OLD | NEW |
| (Empty) |
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #include "base/basictypes.h" | |
6 #include "base/files/file_util.h" | |
7 #include "base/metrics/field_trial.h" | |
8 #include "base/port.h" | |
9 #include "base/run_loop.h" | |
10 #include "base/strings/string_util.h" | |
11 #include "base/strings/stringprintf.h" | |
12 #include "base/third_party/dynamic_annotations/dynamic_annotations.h" | |
13 #include "base/thread_task_runner_handle.h" | |
14 #include "base/threading/platform_thread.h" | |
15 #include "base/threading/thread_restrictions.h" | |
16 #include "net/base/cache_type.h" | |
17 #include "net/base/io_buffer.h" | |
18 #include "net/base/net_errors.h" | |
19 #include "net/base/test_completion_callback.h" | |
20 #include "net/disk_cache/blockfile/backend_impl.h" | |
21 #include "net/disk_cache/blockfile/entry_impl.h" | |
22 #include "net/disk_cache/blockfile/experiments.h" | |
23 #include "net/disk_cache/blockfile/histogram_macros.h" | |
24 #include "net/disk_cache/blockfile/mapped_file.h" | |
25 #include "net/disk_cache/cache_util.h" | |
26 #include "net/disk_cache/disk_cache_test_base.h" | |
27 #include "net/disk_cache/disk_cache_test_util.h" | |
28 #include "net/disk_cache/memory/mem_backend_impl.h" | |
29 #include "net/disk_cache/simple/simple_backend_impl.h" | |
30 #include "net/disk_cache/simple/simple_entry_format.h" | |
31 #include "net/disk_cache/simple/simple_test_util.h" | |
32 #include "net/disk_cache/simple/simple_util.h" | |
33 #include "testing/gtest/include/gtest/gtest.h" | |
34 | |
35 #if defined(OS_WIN) | |
36 #include "base/win/scoped_handle.h" | |
37 #endif | |
38 | |
39 // Provide a BackendImpl object to macros from histogram_macros.h. | |
40 #define CACHE_UMA_BACKEND_IMPL_OBJ backend_ | |
41 | |
42 using base::Time; | |
43 | |
44 namespace { | |
45 | |
46 const char kExistingEntryKey[] = "existing entry key"; | |
47 | |
48 scoped_ptr<disk_cache::BackendImpl> CreateExistingEntryCache( | |
49 const base::Thread& cache_thread, | |
50 base::FilePath& cache_path) { | |
51 net::TestCompletionCallback cb; | |
52 | |
53 scoped_ptr<disk_cache::BackendImpl> cache(new disk_cache::BackendImpl( | |
54 cache_path, cache_thread.message_loop_proxy(), NULL)); | |
55 int rv = cache->Init(cb.callback()); | |
56 if (cb.GetResult(rv) != net::OK) | |
57 return scoped_ptr<disk_cache::BackendImpl>(); | |
58 | |
59 disk_cache::Entry* entry = NULL; | |
60 rv = cache->CreateEntry(kExistingEntryKey, &entry, cb.callback()); | |
61 if (cb.GetResult(rv) != net::OK) | |
62 return scoped_ptr<disk_cache::BackendImpl>(); | |
63 entry->Close(); | |
64 | |
65 return cache.Pass(); | |
66 } | |
67 | |
68 } // namespace | |
69 | |
70 // Tests that can run with different types of caches. | |
71 class DiskCacheBackendTest : public DiskCacheTestWithCache { | |
72 protected: | |
73 // Some utility methods: | |
74 | |
75 // Perform IO operations on the cache until there is pending IO. | |
76 int GeneratePendingIO(net::TestCompletionCallback* cb); | |
77 | |
78 // Adds 5 sparse entries. |doomed_start| and |doomed_end| if not NULL, | |
79 // will be filled with times, used by DoomEntriesSince and DoomEntriesBetween. | |
80 // There are 4 entries after doomed_start and 2 after doomed_end. | |
81 void InitSparseCache(base::Time* doomed_start, base::Time* doomed_end); | |
82 | |
83 bool CreateSetOfRandomEntries(std::set<std::string>* key_pool); | |
84 bool EnumerateAndMatchKeys(int max_to_open, | |
85 TestIterator* iter, | |
86 std::set<std::string>* keys_to_match, | |
87 size_t* count); | |
88 | |
89 // Actual tests: | |
90 void BackendBasics(); | |
91 void BackendKeying(); | |
92 void BackendShutdownWithPendingFileIO(bool fast); | |
93 void BackendShutdownWithPendingIO(bool fast); | |
94 void BackendShutdownWithPendingCreate(bool fast); | |
95 void BackendSetSize(); | |
96 void BackendLoad(); | |
97 void BackendChain(); | |
98 void BackendValidEntry(); | |
99 void BackendInvalidEntry(); | |
100 void BackendInvalidEntryRead(); | |
101 void BackendInvalidEntryWithLoad(); | |
102 void BackendTrimInvalidEntry(); | |
103 void BackendTrimInvalidEntry2(); | |
104 void BackendEnumerations(); | |
105 void BackendEnumerations2(); | |
106 void BackendInvalidEntryEnumeration(); | |
107 void BackendFixEnumerators(); | |
108 void BackendDoomRecent(); | |
109 void BackendDoomBetween(); | |
110 void BackendTransaction(const std::string& name, int num_entries, bool load); | |
111 void BackendRecoverInsert(); | |
112 void BackendRecoverRemove(); | |
113 void BackendRecoverWithEviction(); | |
114 void BackendInvalidEntry2(); | |
115 void BackendInvalidEntry3(); | |
116 void BackendInvalidEntry7(); | |
117 void BackendInvalidEntry8(); | |
118 void BackendInvalidEntry9(bool eviction); | |
119 void BackendInvalidEntry10(bool eviction); | |
120 void BackendInvalidEntry11(bool eviction); | |
121 void BackendTrimInvalidEntry12(); | |
122 void BackendDoomAll(); | |
123 void BackendDoomAll2(); | |
124 void BackendInvalidRankings(); | |
125 void BackendInvalidRankings2(); | |
126 void BackendDisable(); | |
127 void BackendDisable2(); | |
128 void BackendDisable3(); | |
129 void BackendDisable4(); | |
130 }; | |
131 | |
132 int DiskCacheBackendTest::GeneratePendingIO(net::TestCompletionCallback* cb) { | |
133 if (!use_current_thread_) { | |
134 ADD_FAILURE(); | |
135 return net::ERR_FAILED; | |
136 } | |
137 | |
138 disk_cache::Entry* entry; | |
139 int rv = cache_->CreateEntry("some key", &entry, cb->callback()); | |
140 if (cb->GetResult(rv) != net::OK) | |
141 return net::ERR_CACHE_CREATE_FAILURE; | |
142 | |
143 const int kSize = 25000; | |
144 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize)); | |
145 CacheTestFillBuffer(buffer->data(), kSize, false); | |
146 | |
147 for (int i = 0; i < 10 * 1024 * 1024; i += 64 * 1024) { | |
148 // We are using the current thread as the cache thread because we want to | |
149 // be able to call directly this method to make sure that the OS (instead | |
150 // of us switching thread) is returning IO pending. | |
151 if (!simple_cache_mode_) { | |
152 rv = static_cast<disk_cache::EntryImpl*>(entry)->WriteDataImpl( | |
153 0, i, buffer.get(), kSize, cb->callback(), false); | |
154 } else { | |
155 rv = entry->WriteData(0, i, buffer.get(), kSize, cb->callback(), false); | |
156 } | |
157 | |
158 if (rv == net::ERR_IO_PENDING) | |
159 break; | |
160 if (rv != kSize) | |
161 rv = net::ERR_FAILED; | |
162 } | |
163 | |
164 // Don't call Close() to avoid going through the queue or we'll deadlock | |
165 // waiting for the operation to finish. | |
166 if (!simple_cache_mode_) | |
167 static_cast<disk_cache::EntryImpl*>(entry)->Release(); | |
168 else | |
169 entry->Close(); | |
170 | |
171 return rv; | |
172 } | |
173 | |
174 void DiskCacheBackendTest::InitSparseCache(base::Time* doomed_start, | |
175 base::Time* doomed_end) { | |
176 InitCache(); | |
177 | |
178 const int kSize = 50; | |
179 // This must be greater then MemEntryImpl::kMaxSparseEntrySize. | |
180 const int kOffset = 10 + 1024 * 1024; | |
181 | |
182 disk_cache::Entry* entry0 = NULL; | |
183 disk_cache::Entry* entry1 = NULL; | |
184 disk_cache::Entry* entry2 = NULL; | |
185 | |
186 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize)); | |
187 CacheTestFillBuffer(buffer->data(), kSize, false); | |
188 | |
189 ASSERT_EQ(net::OK, CreateEntry("zeroth", &entry0)); | |
190 ASSERT_EQ(kSize, WriteSparseData(entry0, 0, buffer.get(), kSize)); | |
191 ASSERT_EQ(kSize, | |
192 WriteSparseData(entry0, kOffset + kSize, buffer.get(), kSize)); | |
193 entry0->Close(); | |
194 | |
195 FlushQueueForTest(); | |
196 AddDelay(); | |
197 if (doomed_start) | |
198 *doomed_start = base::Time::Now(); | |
199 | |
200 // Order in rankings list: | |
201 // first_part1, first_part2, second_part1, second_part2 | |
202 ASSERT_EQ(net::OK, CreateEntry("first", &entry1)); | |
203 ASSERT_EQ(kSize, WriteSparseData(entry1, 0, buffer.get(), kSize)); | |
204 ASSERT_EQ(kSize, | |
205 WriteSparseData(entry1, kOffset + kSize, buffer.get(), kSize)); | |
206 entry1->Close(); | |
207 | |
208 ASSERT_EQ(net::OK, CreateEntry("second", &entry2)); | |
209 ASSERT_EQ(kSize, WriteSparseData(entry2, 0, buffer.get(), kSize)); | |
210 ASSERT_EQ(kSize, | |
211 WriteSparseData(entry2, kOffset + kSize, buffer.get(), kSize)); | |
212 entry2->Close(); | |
213 | |
214 FlushQueueForTest(); | |
215 AddDelay(); | |
216 if (doomed_end) | |
217 *doomed_end = base::Time::Now(); | |
218 | |
219 // Order in rankings list: | |
220 // third_part1, fourth_part1, third_part2, fourth_part2 | |
221 disk_cache::Entry* entry3 = NULL; | |
222 disk_cache::Entry* entry4 = NULL; | |
223 ASSERT_EQ(net::OK, CreateEntry("third", &entry3)); | |
224 ASSERT_EQ(kSize, WriteSparseData(entry3, 0, buffer.get(), kSize)); | |
225 ASSERT_EQ(net::OK, CreateEntry("fourth", &entry4)); | |
226 ASSERT_EQ(kSize, WriteSparseData(entry4, 0, buffer.get(), kSize)); | |
227 ASSERT_EQ(kSize, | |
228 WriteSparseData(entry3, kOffset + kSize, buffer.get(), kSize)); | |
229 ASSERT_EQ(kSize, | |
230 WriteSparseData(entry4, kOffset + kSize, buffer.get(), kSize)); | |
231 entry3->Close(); | |
232 entry4->Close(); | |
233 | |
234 FlushQueueForTest(); | |
235 AddDelay(); | |
236 } | |
237 | |
238 // Creates entries based on random keys. Stores these keys in |key_pool|. | |
239 bool DiskCacheBackendTest::CreateSetOfRandomEntries( | |
240 std::set<std::string>* key_pool) { | |
241 const int kNumEntries = 10; | |
242 | |
243 for (int i = 0; i < kNumEntries; ++i) { | |
244 std::string key = GenerateKey(true); | |
245 disk_cache::Entry* entry; | |
246 if (CreateEntry(key, &entry) != net::OK) | |
247 return false; | |
248 key_pool->insert(key); | |
249 entry->Close(); | |
250 } | |
251 return key_pool->size() == implicit_cast<size_t>(cache_->GetEntryCount()); | |
252 } | |
253 | |
254 // Performs iteration over the backend and checks that the keys of entries | |
255 // opened are in |keys_to_match|, then erases them. Up to |max_to_open| entries | |
256 // will be opened, if it is positive. Otherwise, iteration will continue until | |
257 // OpenNextEntry stops returning net::OK. | |
258 bool DiskCacheBackendTest::EnumerateAndMatchKeys( | |
259 int max_to_open, | |
260 TestIterator* iter, | |
261 std::set<std::string>* keys_to_match, | |
262 size_t* count) { | |
263 disk_cache::Entry* entry; | |
264 | |
265 if (!iter) | |
266 return false; | |
267 while (iter->OpenNextEntry(&entry) == net::OK) { | |
268 if (!entry) | |
269 return false; | |
270 EXPECT_EQ(1U, keys_to_match->erase(entry->GetKey())); | |
271 entry->Close(); | |
272 ++(*count); | |
273 if (max_to_open >= 0 && implicit_cast<int>(*count) >= max_to_open) | |
274 break; | |
275 }; | |
276 | |
277 return true; | |
278 } | |
279 | |
280 void DiskCacheBackendTest::BackendBasics() { | |
281 InitCache(); | |
282 disk_cache::Entry *entry1 = NULL, *entry2 = NULL; | |
283 EXPECT_NE(net::OK, OpenEntry("the first key", &entry1)); | |
284 ASSERT_EQ(net::OK, CreateEntry("the first key", &entry1)); | |
285 ASSERT_TRUE(NULL != entry1); | |
286 entry1->Close(); | |
287 entry1 = NULL; | |
288 | |
289 ASSERT_EQ(net::OK, OpenEntry("the first key", &entry1)); | |
290 ASSERT_TRUE(NULL != entry1); | |
291 entry1->Close(); | |
292 entry1 = NULL; | |
293 | |
294 EXPECT_NE(net::OK, CreateEntry("the first key", &entry1)); | |
295 ASSERT_EQ(net::OK, OpenEntry("the first key", &entry1)); | |
296 EXPECT_NE(net::OK, OpenEntry("some other key", &entry2)); | |
297 ASSERT_EQ(net::OK, CreateEntry("some other key", &entry2)); | |
298 ASSERT_TRUE(NULL != entry1); | |
299 ASSERT_TRUE(NULL != entry2); | |
300 EXPECT_EQ(2, cache_->GetEntryCount()); | |
301 | |
302 disk_cache::Entry* entry3 = NULL; | |
303 ASSERT_EQ(net::OK, OpenEntry("some other key", &entry3)); | |
304 ASSERT_TRUE(NULL != entry3); | |
305 EXPECT_TRUE(entry2 == entry3); | |
306 EXPECT_EQ(2, cache_->GetEntryCount()); | |
307 | |
308 EXPECT_EQ(net::OK, DoomEntry("some other key")); | |
309 EXPECT_EQ(1, cache_->GetEntryCount()); | |
310 entry1->Close(); | |
311 entry2->Close(); | |
312 entry3->Close(); | |
313 | |
314 EXPECT_EQ(net::OK, DoomEntry("the first key")); | |
315 EXPECT_EQ(0, cache_->GetEntryCount()); | |
316 | |
317 ASSERT_EQ(net::OK, CreateEntry("the first key", &entry1)); | |
318 ASSERT_EQ(net::OK, CreateEntry("some other key", &entry2)); | |
319 entry1->Doom(); | |
320 entry1->Close(); | |
321 EXPECT_EQ(net::OK, DoomEntry("some other key")); | |
322 EXPECT_EQ(0, cache_->GetEntryCount()); | |
323 entry2->Close(); | |
324 } | |
325 | |
326 TEST_F(DiskCacheBackendTest, Basics) { | |
327 BackendBasics(); | |
328 } | |
329 | |
330 TEST_F(DiskCacheBackendTest, NewEvictionBasics) { | |
331 SetNewEviction(); | |
332 BackendBasics(); | |
333 } | |
334 | |
335 TEST_F(DiskCacheBackendTest, MemoryOnlyBasics) { | |
336 SetMemoryOnlyMode(); | |
337 BackendBasics(); | |
338 } | |
339 | |
340 TEST_F(DiskCacheBackendTest, AppCacheBasics) { | |
341 SetCacheType(net::APP_CACHE); | |
342 BackendBasics(); | |
343 } | |
344 | |
345 TEST_F(DiskCacheBackendTest, ShaderCacheBasics) { | |
346 SetCacheType(net::SHADER_CACHE); | |
347 BackendBasics(); | |
348 } | |
349 | |
350 void DiskCacheBackendTest::BackendKeying() { | |
351 InitCache(); | |
352 const char kName1[] = "the first key"; | |
353 const char kName2[] = "the first Key"; | |
354 disk_cache::Entry *entry1, *entry2; | |
355 ASSERT_EQ(net::OK, CreateEntry(kName1, &entry1)); | |
356 | |
357 ASSERT_EQ(net::OK, CreateEntry(kName2, &entry2)); | |
358 EXPECT_TRUE(entry1 != entry2) << "Case sensitive"; | |
359 entry2->Close(); | |
360 | |
361 char buffer[30]; | |
362 base::strlcpy(buffer, kName1, arraysize(buffer)); | |
363 ASSERT_EQ(net::OK, OpenEntry(buffer, &entry2)); | |
364 EXPECT_TRUE(entry1 == entry2); | |
365 entry2->Close(); | |
366 | |
367 base::strlcpy(buffer + 1, kName1, arraysize(buffer) - 1); | |
368 ASSERT_EQ(net::OK, OpenEntry(buffer + 1, &entry2)); | |
369 EXPECT_TRUE(entry1 == entry2); | |
370 entry2->Close(); | |
371 | |
372 base::strlcpy(buffer + 3, kName1, arraysize(buffer) - 3); | |
373 ASSERT_EQ(net::OK, OpenEntry(buffer + 3, &entry2)); | |
374 EXPECT_TRUE(entry1 == entry2); | |
375 entry2->Close(); | |
376 | |
377 // Now verify long keys. | |
378 char buffer2[20000]; | |
379 memset(buffer2, 's', sizeof(buffer2)); | |
380 buffer2[1023] = '\0'; | |
381 ASSERT_EQ(net::OK, CreateEntry(buffer2, &entry2)) << "key on block file"; | |
382 entry2->Close(); | |
383 | |
384 buffer2[1023] = 'g'; | |
385 buffer2[19999] = '\0'; | |
386 ASSERT_EQ(net::OK, CreateEntry(buffer2, &entry2)) << "key on external file"; | |
387 entry2->Close(); | |
388 entry1->Close(); | |
389 } | |
390 | |
391 TEST_F(DiskCacheBackendTest, Keying) { | |
392 BackendKeying(); | |
393 } | |
394 | |
395 TEST_F(DiskCacheBackendTest, NewEvictionKeying) { | |
396 SetNewEviction(); | |
397 BackendKeying(); | |
398 } | |
399 | |
400 TEST_F(DiskCacheBackendTest, MemoryOnlyKeying) { | |
401 SetMemoryOnlyMode(); | |
402 BackendKeying(); | |
403 } | |
404 | |
405 TEST_F(DiskCacheBackendTest, AppCacheKeying) { | |
406 SetCacheType(net::APP_CACHE); | |
407 BackendKeying(); | |
408 } | |
409 | |
410 TEST_F(DiskCacheBackendTest, ShaderCacheKeying) { | |
411 SetCacheType(net::SHADER_CACHE); | |
412 BackendKeying(); | |
413 } | |
414 | |
415 TEST_F(DiskCacheTest, CreateBackend) { | |
416 net::TestCompletionCallback cb; | |
417 | |
418 { | |
419 ASSERT_TRUE(CleanupCacheDir()); | |
420 base::Thread cache_thread("CacheThread"); | |
421 ASSERT_TRUE(cache_thread.StartWithOptions( | |
422 base::Thread::Options(base::MessageLoop::TYPE_IO, 0))); | |
423 | |
424 // Test the private factory method(s). | |
425 scoped_ptr<disk_cache::Backend> cache; | |
426 cache = disk_cache::MemBackendImpl::CreateBackend(0, NULL); | |
427 ASSERT_TRUE(cache.get()); | |
428 cache.reset(); | |
429 | |
430 // Now test the public API. | |
431 int rv = disk_cache::CreateCacheBackend(net::DISK_CACHE, | |
432 net::CACHE_BACKEND_DEFAULT, | |
433 cache_path_, | |
434 0, | |
435 false, | |
436 cache_thread.task_runner(), | |
437 NULL, | |
438 &cache, | |
439 cb.callback()); | |
440 ASSERT_EQ(net::OK, cb.GetResult(rv)); | |
441 ASSERT_TRUE(cache.get()); | |
442 cache.reset(); | |
443 | |
444 rv = disk_cache::CreateCacheBackend(net::MEMORY_CACHE, | |
445 net::CACHE_BACKEND_DEFAULT, | |
446 base::FilePath(), 0, | |
447 false, NULL, NULL, &cache, | |
448 cb.callback()); | |
449 ASSERT_EQ(net::OK, cb.GetResult(rv)); | |
450 ASSERT_TRUE(cache.get()); | |
451 cache.reset(); | |
452 } | |
453 | |
454 base::MessageLoop::current()->RunUntilIdle(); | |
455 } | |
456 | |
457 // Tests that |BackendImpl| fails to initialize with a missing file. | |
458 TEST_F(DiskCacheBackendTest, CreateBackend_MissingFile) { | |
459 ASSERT_TRUE(CopyTestCache("bad_entry")); | |
460 base::FilePath filename = cache_path_.AppendASCII("data_1"); | |
461 base::DeleteFile(filename, false); | |
462 base::Thread cache_thread("CacheThread"); | |
463 ASSERT_TRUE(cache_thread.StartWithOptions( | |
464 base::Thread::Options(base::MessageLoop::TYPE_IO, 0))); | |
465 net::TestCompletionCallback cb; | |
466 | |
467 bool prev = base::ThreadRestrictions::SetIOAllowed(false); | |
468 scoped_ptr<disk_cache::BackendImpl> cache(new disk_cache::BackendImpl( | |
469 cache_path_, cache_thread.task_runner(), NULL)); | |
470 int rv = cache->Init(cb.callback()); | |
471 EXPECT_EQ(net::ERR_FAILED, cb.GetResult(rv)); | |
472 base::ThreadRestrictions::SetIOAllowed(prev); | |
473 | |
474 cache.reset(); | |
475 DisableIntegrityCheck(); | |
476 } | |
477 | |
478 TEST_F(DiskCacheBackendTest, ExternalFiles) { | |
479 InitCache(); | |
480 // First, let's create a file on the folder. | |
481 base::FilePath filename = cache_path_.AppendASCII("f_000001"); | |
482 | |
483 const int kSize = 50; | |
484 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize)); | |
485 CacheTestFillBuffer(buffer1->data(), kSize, false); | |
486 ASSERT_EQ(kSize, base::WriteFile(filename, buffer1->data(), kSize)); | |
487 | |
488 // Now let's create a file with the cache. | |
489 disk_cache::Entry* entry; | |
490 ASSERT_EQ(net::OK, CreateEntry("key", &entry)); | |
491 ASSERT_EQ(0, WriteData(entry, 0, 20000, buffer1.get(), 0, false)); | |
492 entry->Close(); | |
493 | |
494 // And verify that the first file is still there. | |
495 scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize)); | |
496 ASSERT_EQ(kSize, base::ReadFile(filename, buffer2->data(), kSize)); | |
497 EXPECT_EQ(0, memcmp(buffer1->data(), buffer2->data(), kSize)); | |
498 } | |
499 | |
500 // Tests that we deal with file-level pending operations at destruction time. | |
501 void DiskCacheBackendTest::BackendShutdownWithPendingFileIO(bool fast) { | |
502 ASSERT_TRUE(CleanupCacheDir()); | |
503 uint32 flags = disk_cache::kNoBuffering; | |
504 if (!fast) | |
505 flags |= disk_cache::kNoRandom; | |
506 | |
507 UseCurrentThread(); | |
508 CreateBackend(flags, NULL); | |
509 | |
510 net::TestCompletionCallback cb; | |
511 int rv = GeneratePendingIO(&cb); | |
512 | |
513 // The cache destructor will see one pending operation here. | |
514 cache_.reset(); | |
515 | |
516 if (rv == net::ERR_IO_PENDING) { | |
517 if (fast || simple_cache_mode_) | |
518 EXPECT_FALSE(cb.have_result()); | |
519 else | |
520 EXPECT_TRUE(cb.have_result()); | |
521 } | |
522 | |
523 base::MessageLoop::current()->RunUntilIdle(); | |
524 | |
525 #if !defined(OS_IOS) | |
526 // Wait for the actual operation to complete, or we'll keep a file handle that | |
527 // may cause issues later. Note that on iOS systems even though this test | |
528 // uses a single thread, the actual IO is posted to a worker thread and the | |
529 // cache destructor breaks the link to reach cb when the operation completes. | |
530 rv = cb.GetResult(rv); | |
531 #endif | |
532 } | |
533 | |
534 TEST_F(DiskCacheBackendTest, ShutdownWithPendingFileIO) { | |
535 BackendShutdownWithPendingFileIO(false); | |
536 } | |
537 | |
538 // Here and below, tests that simulate crashes are not compiled in LeakSanitizer | |
539 // builds because they contain a lot of intentional memory leaks. | |
540 // The wrapper scripts used to run tests under Valgrind Memcheck will also | |
541 // disable these tests. See: | |
542 // tools/valgrind/gtest_exclude/net_unittests.gtest-memcheck.txt | |
543 #if !defined(LEAK_SANITIZER) | |
544 // We'll be leaking from this test. | |
545 TEST_F(DiskCacheBackendTest, ShutdownWithPendingFileIO_Fast) { | |
546 // The integrity test sets kNoRandom so there's a version mismatch if we don't | |
547 // force new eviction. | |
548 SetNewEviction(); | |
549 BackendShutdownWithPendingFileIO(true); | |
550 } | |
551 #endif | |
552 | |
553 // See crbug.com/330074 | |
554 #if !defined(OS_IOS) | |
555 // Tests that one cache instance is not affected by another one going away. | |
556 TEST_F(DiskCacheBackendTest, MultipleInstancesWithPendingFileIO) { | |
557 base::ScopedTempDir store; | |
558 ASSERT_TRUE(store.CreateUniqueTempDir()); | |
559 | |
560 net::TestCompletionCallback cb; | |
561 scoped_ptr<disk_cache::Backend> extra_cache; | |
562 int rv = disk_cache::CreateCacheBackend(net::DISK_CACHE, | |
563 net::CACHE_BACKEND_DEFAULT, | |
564 store.path(), | |
565 0, | |
566 false, | |
567 base::ThreadTaskRunnerHandle::Get(), | |
568 NULL, | |
569 &extra_cache, | |
570 cb.callback()); | |
571 ASSERT_EQ(net::OK, cb.GetResult(rv)); | |
572 ASSERT_TRUE(extra_cache.get() != NULL); | |
573 | |
574 ASSERT_TRUE(CleanupCacheDir()); | |
575 SetNewEviction(); // Match the expected behavior for integrity verification. | |
576 UseCurrentThread(); | |
577 | |
578 CreateBackend(disk_cache::kNoBuffering, NULL); | |
579 rv = GeneratePendingIO(&cb); | |
580 | |
581 // cache_ has a pending operation, and extra_cache will go away. | |
582 extra_cache.reset(); | |
583 | |
584 if (rv == net::ERR_IO_PENDING) | |
585 EXPECT_FALSE(cb.have_result()); | |
586 | |
587 base::MessageLoop::current()->RunUntilIdle(); | |
588 | |
589 // Wait for the actual operation to complete, or we'll keep a file handle that | |
590 // may cause issues later. | |
591 rv = cb.GetResult(rv); | |
592 } | |
593 #endif | |
594 | |
595 // Tests that we deal with background-thread pending operations. | |
596 void DiskCacheBackendTest::BackendShutdownWithPendingIO(bool fast) { | |
597 net::TestCompletionCallback cb; | |
598 | |
599 { | |
600 ASSERT_TRUE(CleanupCacheDir()); | |
601 base::Thread cache_thread("CacheThread"); | |
602 ASSERT_TRUE(cache_thread.StartWithOptions( | |
603 base::Thread::Options(base::MessageLoop::TYPE_IO, 0))); | |
604 | |
605 uint32 flags = disk_cache::kNoBuffering; | |
606 if (!fast) | |
607 flags |= disk_cache::kNoRandom; | |
608 | |
609 CreateBackend(flags, &cache_thread); | |
610 | |
611 disk_cache::Entry* entry; | |
612 int rv = cache_->CreateEntry("some key", &entry, cb.callback()); | |
613 ASSERT_EQ(net::OK, cb.GetResult(rv)); | |
614 | |
615 entry->Close(); | |
616 | |
617 // The cache destructor will see one pending operation here. | |
618 cache_.reset(); | |
619 } | |
620 | |
621 base::MessageLoop::current()->RunUntilIdle(); | |
622 } | |
623 | |
624 TEST_F(DiskCacheBackendTest, ShutdownWithPendingIO) { | |
625 BackendShutdownWithPendingIO(false); | |
626 } | |
627 | |
628 #if !defined(LEAK_SANITIZER) | |
629 // We'll be leaking from this test. | |
630 TEST_F(DiskCacheBackendTest, ShutdownWithPendingIO_Fast) { | |
631 // The integrity test sets kNoRandom so there's a version mismatch if we don't | |
632 // force new eviction. | |
633 SetNewEviction(); | |
634 BackendShutdownWithPendingIO(true); | |
635 } | |
636 #endif | |
637 | |
638 // Tests that we deal with create-type pending operations. | |
639 void DiskCacheBackendTest::BackendShutdownWithPendingCreate(bool fast) { | |
640 net::TestCompletionCallback cb; | |
641 | |
642 { | |
643 ASSERT_TRUE(CleanupCacheDir()); | |
644 base::Thread cache_thread("CacheThread"); | |
645 ASSERT_TRUE(cache_thread.StartWithOptions( | |
646 base::Thread::Options(base::MessageLoop::TYPE_IO, 0))); | |
647 | |
648 disk_cache::BackendFlags flags = | |
649 fast ? disk_cache::kNone : disk_cache::kNoRandom; | |
650 CreateBackend(flags, &cache_thread); | |
651 | |
652 disk_cache::Entry* entry; | |
653 int rv = cache_->CreateEntry("some key", &entry, cb.callback()); | |
654 ASSERT_EQ(net::ERR_IO_PENDING, rv); | |
655 | |
656 cache_.reset(); | |
657 EXPECT_FALSE(cb.have_result()); | |
658 } | |
659 | |
660 base::MessageLoop::current()->RunUntilIdle(); | |
661 } | |
662 | |
663 TEST_F(DiskCacheBackendTest, ShutdownWithPendingCreate) { | |
664 BackendShutdownWithPendingCreate(false); | |
665 } | |
666 | |
667 #if !defined(LEAK_SANITIZER) | |
668 // We'll be leaking an entry from this test. | |
669 TEST_F(DiskCacheBackendTest, ShutdownWithPendingCreate_Fast) { | |
670 // The integrity test sets kNoRandom so there's a version mismatch if we don't | |
671 // force new eviction. | |
672 SetNewEviction(); | |
673 BackendShutdownWithPendingCreate(true); | |
674 } | |
675 #endif | |
676 | |
677 // Disabled on android since this test requires cache creator to create | |
678 // blockfile caches. | |
679 #if !defined(OS_ANDROID) | |
680 TEST_F(DiskCacheTest, TruncatedIndex) { | |
681 ASSERT_TRUE(CleanupCacheDir()); | |
682 base::FilePath index = cache_path_.AppendASCII("index"); | |
683 ASSERT_EQ(5, base::WriteFile(index, "hello", 5)); | |
684 | |
685 base::Thread cache_thread("CacheThread"); | |
686 ASSERT_TRUE(cache_thread.StartWithOptions( | |
687 base::Thread::Options(base::MessageLoop::TYPE_IO, 0))); | |
688 net::TestCompletionCallback cb; | |
689 | |
690 scoped_ptr<disk_cache::Backend> backend; | |
691 int rv = disk_cache::CreateCacheBackend(net::DISK_CACHE, | |
692 net::CACHE_BACKEND_BLOCKFILE, | |
693 cache_path_, | |
694 0, | |
695 false, | |
696 cache_thread.task_runner(), | |
697 NULL, | |
698 &backend, | |
699 cb.callback()); | |
700 ASSERT_NE(net::OK, cb.GetResult(rv)); | |
701 | |
702 ASSERT_FALSE(backend); | |
703 } | |
704 #endif | |
705 | |
706 void DiskCacheBackendTest::BackendSetSize() { | |
707 const int cache_size = 0x10000; // 64 kB | |
708 SetMaxSize(cache_size); | |
709 InitCache(); | |
710 | |
711 std::string first("some key"); | |
712 std::string second("something else"); | |
713 disk_cache::Entry* entry; | |
714 ASSERT_EQ(net::OK, CreateEntry(first, &entry)); | |
715 | |
716 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(cache_size)); | |
717 memset(buffer->data(), 0, cache_size); | |
718 EXPECT_EQ(cache_size / 10, | |
719 WriteData(entry, 0, 0, buffer.get(), cache_size / 10, false)) | |
720 << "normal file"; | |
721 | |
722 EXPECT_EQ(net::ERR_FAILED, | |
723 WriteData(entry, 1, 0, buffer.get(), cache_size / 5, false)) | |
724 << "file size above the limit"; | |
725 | |
726 // By doubling the total size, we make this file cacheable. | |
727 SetMaxSize(cache_size * 2); | |
728 EXPECT_EQ(cache_size / 5, | |
729 WriteData(entry, 1, 0, buffer.get(), cache_size / 5, false)); | |
730 | |
731 // Let's fill up the cache!. | |
732 SetMaxSize(cache_size * 10); | |
733 EXPECT_EQ(cache_size * 3 / 4, | |
734 WriteData(entry, 0, 0, buffer.get(), cache_size * 3 / 4, false)); | |
735 entry->Close(); | |
736 FlushQueueForTest(); | |
737 | |
738 SetMaxSize(cache_size); | |
739 | |
740 // The cache is 95% full. | |
741 | |
742 ASSERT_EQ(net::OK, CreateEntry(second, &entry)); | |
743 EXPECT_EQ(cache_size / 10, | |
744 WriteData(entry, 0, 0, buffer.get(), cache_size / 10, false)); | |
745 | |
746 disk_cache::Entry* entry2; | |
747 ASSERT_EQ(net::OK, CreateEntry("an extra key", &entry2)); | |
748 EXPECT_EQ(cache_size / 10, | |
749 WriteData(entry2, 0, 0, buffer.get(), cache_size / 10, false)); | |
750 entry2->Close(); // This will trigger the cache trim. | |
751 | |
752 EXPECT_NE(net::OK, OpenEntry(first, &entry2)); | |
753 | |
754 FlushQueueForTest(); // Make sure that we are done trimming the cache. | |
755 FlushQueueForTest(); // We may have posted two tasks to evict stuff. | |
756 | |
757 entry->Close(); | |
758 ASSERT_EQ(net::OK, OpenEntry(second, &entry)); | |
759 EXPECT_EQ(cache_size / 10, entry->GetDataSize(0)); | |
760 entry->Close(); | |
761 } | |
762 | |
763 TEST_F(DiskCacheBackendTest, SetSize) { | |
764 BackendSetSize(); | |
765 } | |
766 | |
767 TEST_F(DiskCacheBackendTest, NewEvictionSetSize) { | |
768 SetNewEviction(); | |
769 BackendSetSize(); | |
770 } | |
771 | |
772 TEST_F(DiskCacheBackendTest, MemoryOnlySetSize) { | |
773 SetMemoryOnlyMode(); | |
774 BackendSetSize(); | |
775 } | |
776 | |
777 void DiskCacheBackendTest::BackendLoad() { | |
778 InitCache(); | |
779 int seed = static_cast<int>(Time::Now().ToInternalValue()); | |
780 srand(seed); | |
781 | |
782 disk_cache::Entry* entries[100]; | |
783 for (int i = 0; i < 100; i++) { | |
784 std::string key = GenerateKey(true); | |
785 ASSERT_EQ(net::OK, CreateEntry(key, &entries[i])); | |
786 } | |
787 EXPECT_EQ(100, cache_->GetEntryCount()); | |
788 | |
789 for (int i = 0; i < 100; i++) { | |
790 int source1 = rand() % 100; | |
791 int source2 = rand() % 100; | |
792 disk_cache::Entry* temp = entries[source1]; | |
793 entries[source1] = entries[source2]; | |
794 entries[source2] = temp; | |
795 } | |
796 | |
797 for (int i = 0; i < 100; i++) { | |
798 disk_cache::Entry* entry; | |
799 ASSERT_EQ(net::OK, OpenEntry(entries[i]->GetKey(), &entry)); | |
800 EXPECT_TRUE(entry == entries[i]); | |
801 entry->Close(); | |
802 entries[i]->Doom(); | |
803 entries[i]->Close(); | |
804 } | |
805 FlushQueueForTest(); | |
806 EXPECT_EQ(0, cache_->GetEntryCount()); | |
807 } | |
808 | |
809 TEST_F(DiskCacheBackendTest, Load) { | |
810 // Work with a tiny index table (16 entries) | |
811 SetMask(0xf); | |
812 SetMaxSize(0x100000); | |
813 BackendLoad(); | |
814 } | |
815 | |
816 TEST_F(DiskCacheBackendTest, NewEvictionLoad) { | |
817 SetNewEviction(); | |
818 // Work with a tiny index table (16 entries) | |
819 SetMask(0xf); | |
820 SetMaxSize(0x100000); | |
821 BackendLoad(); | |
822 } | |
823 | |
824 TEST_F(DiskCacheBackendTest, MemoryOnlyLoad) { | |
825 SetMaxSize(0x100000); | |
826 SetMemoryOnlyMode(); | |
827 BackendLoad(); | |
828 } | |
829 | |
830 TEST_F(DiskCacheBackendTest, AppCacheLoad) { | |
831 SetCacheType(net::APP_CACHE); | |
832 // Work with a tiny index table (16 entries) | |
833 SetMask(0xf); | |
834 SetMaxSize(0x100000); | |
835 BackendLoad(); | |
836 } | |
837 | |
838 TEST_F(DiskCacheBackendTest, ShaderCacheLoad) { | |
839 SetCacheType(net::SHADER_CACHE); | |
840 // Work with a tiny index table (16 entries) | |
841 SetMask(0xf); | |
842 SetMaxSize(0x100000); | |
843 BackendLoad(); | |
844 } | |
845 | |
846 // Tests the chaining of an entry to the current head. | |
847 void DiskCacheBackendTest::BackendChain() { | |
848 SetMask(0x1); // 2-entry table. | |
849 SetMaxSize(0x3000); // 12 kB. | |
850 InitCache(); | |
851 | |
852 disk_cache::Entry* entry; | |
853 ASSERT_EQ(net::OK, CreateEntry("The first key", &entry)); | |
854 entry->Close(); | |
855 ASSERT_EQ(net::OK, CreateEntry("The Second key", &entry)); | |
856 entry->Close(); | |
857 } | |
858 | |
859 TEST_F(DiskCacheBackendTest, Chain) { | |
860 BackendChain(); | |
861 } | |
862 | |
863 TEST_F(DiskCacheBackendTest, NewEvictionChain) { | |
864 SetNewEviction(); | |
865 BackendChain(); | |
866 } | |
867 | |
868 TEST_F(DiskCacheBackendTest, AppCacheChain) { | |
869 SetCacheType(net::APP_CACHE); | |
870 BackendChain(); | |
871 } | |
872 | |
873 TEST_F(DiskCacheBackendTest, ShaderCacheChain) { | |
874 SetCacheType(net::SHADER_CACHE); | |
875 BackendChain(); | |
876 } | |
877 | |
878 TEST_F(DiskCacheBackendTest, NewEvictionTrim) { | |
879 SetNewEviction(); | |
880 InitCache(); | |
881 | |
882 disk_cache::Entry* entry; | |
883 for (int i = 0; i < 100; i++) { | |
884 std::string name(base::StringPrintf("Key %d", i)); | |
885 ASSERT_EQ(net::OK, CreateEntry(name, &entry)); | |
886 entry->Close(); | |
887 if (i < 90) { | |
888 // Entries 0 to 89 are in list 1; 90 to 99 are in list 0. | |
889 ASSERT_EQ(net::OK, OpenEntry(name, &entry)); | |
890 entry->Close(); | |
891 } | |
892 } | |
893 | |
894 // The first eviction must come from list 1 (10% limit), the second must come | |
895 // from list 0. | |
896 TrimForTest(false); | |
897 EXPECT_NE(net::OK, OpenEntry("Key 0", &entry)); | |
898 TrimForTest(false); | |
899 EXPECT_NE(net::OK, OpenEntry("Key 90", &entry)); | |
900 | |
901 // Double check that we still have the list tails. | |
902 ASSERT_EQ(net::OK, OpenEntry("Key 1", &entry)); | |
903 entry->Close(); | |
904 ASSERT_EQ(net::OK, OpenEntry("Key 91", &entry)); | |
905 entry->Close(); | |
906 } | |
907 | |
908 // Before looking for invalid entries, let's check a valid entry. | |
909 void DiskCacheBackendTest::BackendValidEntry() { | |
910 InitCache(); | |
911 | |
912 std::string key("Some key"); | |
913 disk_cache::Entry* entry; | |
914 ASSERT_EQ(net::OK, CreateEntry(key, &entry)); | |
915 | |
916 const int kSize = 50; | |
917 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize)); | |
918 memset(buffer1->data(), 0, kSize); | |
919 base::strlcpy(buffer1->data(), "And the data to save", kSize); | |
920 EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer1.get(), kSize, false)); | |
921 entry->Close(); | |
922 SimulateCrash(); | |
923 | |
924 ASSERT_EQ(net::OK, OpenEntry(key, &entry)); | |
925 | |
926 scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize)); | |
927 memset(buffer2->data(), 0, kSize); | |
928 EXPECT_EQ(kSize, ReadData(entry, 0, 0, buffer2.get(), kSize)); | |
929 entry->Close(); | |
930 EXPECT_STREQ(buffer1->data(), buffer2->data()); | |
931 } | |
932 | |
933 TEST_F(DiskCacheBackendTest, ValidEntry) { | |
934 BackendValidEntry(); | |
935 } | |
936 | |
937 TEST_F(DiskCacheBackendTest, NewEvictionValidEntry) { | |
938 SetNewEviction(); | |
939 BackendValidEntry(); | |
940 } | |
941 | |
942 // The same logic of the previous test (ValidEntry), but this time force the | |
943 // entry to be invalid, simulating a crash in the middle. | |
944 // We'll be leaking memory from this test. | |
945 void DiskCacheBackendTest::BackendInvalidEntry() { | |
946 InitCache(); | |
947 | |
948 std::string key("Some key"); | |
949 disk_cache::Entry* entry; | |
950 ASSERT_EQ(net::OK, CreateEntry(key, &entry)); | |
951 | |
952 const int kSize = 50; | |
953 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize)); | |
954 memset(buffer->data(), 0, kSize); | |
955 base::strlcpy(buffer->data(), "And the data to save", kSize); | |
956 EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false)); | |
957 SimulateCrash(); | |
958 | |
959 EXPECT_NE(net::OK, OpenEntry(key, &entry)); | |
960 EXPECT_EQ(0, cache_->GetEntryCount()); | |
961 } | |
962 | |
963 #if !defined(LEAK_SANITIZER) | |
964 // We'll be leaking memory from this test. | |
965 TEST_F(DiskCacheBackendTest, InvalidEntry) { | |
966 BackendInvalidEntry(); | |
967 } | |
968 | |
969 // We'll be leaking memory from this test. | |
970 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntry) { | |
971 SetNewEviction(); | |
972 BackendInvalidEntry(); | |
973 } | |
974 | |
975 // We'll be leaking memory from this test. | |
976 TEST_F(DiskCacheBackendTest, AppCacheInvalidEntry) { | |
977 SetCacheType(net::APP_CACHE); | |
978 BackendInvalidEntry(); | |
979 } | |
980 | |
981 // We'll be leaking memory from this test. | |
982 TEST_F(DiskCacheBackendTest, ShaderCacheInvalidEntry) { | |
983 SetCacheType(net::SHADER_CACHE); | |
984 BackendInvalidEntry(); | |
985 } | |
986 | |
987 // Almost the same test, but this time crash the cache after reading an entry. | |
988 // We'll be leaking memory from this test. | |
989 void DiskCacheBackendTest::BackendInvalidEntryRead() { | |
990 InitCache(); | |
991 | |
992 std::string key("Some key"); | |
993 disk_cache::Entry* entry; | |
994 ASSERT_EQ(net::OK, CreateEntry(key, &entry)); | |
995 | |
996 const int kSize = 50; | |
997 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize)); | |
998 memset(buffer->data(), 0, kSize); | |
999 base::strlcpy(buffer->data(), "And the data to save", kSize); | |
1000 EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false)); | |
1001 entry->Close(); | |
1002 ASSERT_EQ(net::OK, OpenEntry(key, &entry)); | |
1003 EXPECT_EQ(kSize, ReadData(entry, 0, 0, buffer.get(), kSize)); | |
1004 | |
1005 SimulateCrash(); | |
1006 | |
1007 if (type_ == net::APP_CACHE) { | |
1008 // Reading an entry and crashing should not make it dirty. | |
1009 ASSERT_EQ(net::OK, OpenEntry(key, &entry)); | |
1010 EXPECT_EQ(1, cache_->GetEntryCount()); | |
1011 entry->Close(); | |
1012 } else { | |
1013 EXPECT_NE(net::OK, OpenEntry(key, &entry)); | |
1014 EXPECT_EQ(0, cache_->GetEntryCount()); | |
1015 } | |
1016 } | |
1017 | |
1018 // We'll be leaking memory from this test. | |
1019 TEST_F(DiskCacheBackendTest, InvalidEntryRead) { | |
1020 BackendInvalidEntryRead(); | |
1021 } | |
1022 | |
1023 // We'll be leaking memory from this test. | |
1024 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntryRead) { | |
1025 SetNewEviction(); | |
1026 BackendInvalidEntryRead(); | |
1027 } | |
1028 | |
1029 // We'll be leaking memory from this test. | |
1030 TEST_F(DiskCacheBackendTest, AppCacheInvalidEntryRead) { | |
1031 SetCacheType(net::APP_CACHE); | |
1032 BackendInvalidEntryRead(); | |
1033 } | |
1034 | |
1035 // We'll be leaking memory from this test. | |
1036 TEST_F(DiskCacheBackendTest, ShaderCacheInvalidEntryRead) { | |
1037 SetCacheType(net::SHADER_CACHE); | |
1038 BackendInvalidEntryRead(); | |
1039 } | |
1040 | |
1041 // We'll be leaking memory from this test. | |
1042 void DiskCacheBackendTest::BackendInvalidEntryWithLoad() { | |
1043 // Work with a tiny index table (16 entries) | |
1044 SetMask(0xf); | |
1045 SetMaxSize(0x100000); | |
1046 InitCache(); | |
1047 | |
1048 int seed = static_cast<int>(Time::Now().ToInternalValue()); | |
1049 srand(seed); | |
1050 | |
1051 const int kNumEntries = 100; | |
1052 disk_cache::Entry* entries[kNumEntries]; | |
1053 for (int i = 0; i < kNumEntries; i++) { | |
1054 std::string key = GenerateKey(true); | |
1055 ASSERT_EQ(net::OK, CreateEntry(key, &entries[i])); | |
1056 } | |
1057 EXPECT_EQ(kNumEntries, cache_->GetEntryCount()); | |
1058 | |
1059 for (int i = 0; i < kNumEntries; i++) { | |
1060 int source1 = rand() % kNumEntries; | |
1061 int source2 = rand() % kNumEntries; | |
1062 disk_cache::Entry* temp = entries[source1]; | |
1063 entries[source1] = entries[source2]; | |
1064 entries[source2] = temp; | |
1065 } | |
1066 | |
1067 std::string keys[kNumEntries]; | |
1068 for (int i = 0; i < kNumEntries; i++) { | |
1069 keys[i] = entries[i]->GetKey(); | |
1070 if (i < kNumEntries / 2) | |
1071 entries[i]->Close(); | |
1072 } | |
1073 | |
1074 SimulateCrash(); | |
1075 | |
1076 for (int i = kNumEntries / 2; i < kNumEntries; i++) { | |
1077 disk_cache::Entry* entry; | |
1078 EXPECT_NE(net::OK, OpenEntry(keys[i], &entry)); | |
1079 } | |
1080 | |
1081 for (int i = 0; i < kNumEntries / 2; i++) { | |
1082 disk_cache::Entry* entry; | |
1083 ASSERT_EQ(net::OK, OpenEntry(keys[i], &entry)); | |
1084 entry->Close(); | |
1085 } | |
1086 | |
1087 EXPECT_EQ(kNumEntries / 2, cache_->GetEntryCount()); | |
1088 } | |
1089 | |
1090 // We'll be leaking memory from this test. | |
1091 TEST_F(DiskCacheBackendTest, InvalidEntryWithLoad) { | |
1092 BackendInvalidEntryWithLoad(); | |
1093 } | |
1094 | |
1095 // We'll be leaking memory from this test. | |
1096 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntryWithLoad) { | |
1097 SetNewEviction(); | |
1098 BackendInvalidEntryWithLoad(); | |
1099 } | |
1100 | |
1101 // We'll be leaking memory from this test. | |
1102 TEST_F(DiskCacheBackendTest, AppCacheInvalidEntryWithLoad) { | |
1103 SetCacheType(net::APP_CACHE); | |
1104 BackendInvalidEntryWithLoad(); | |
1105 } | |
1106 | |
1107 // We'll be leaking memory from this test. | |
1108 TEST_F(DiskCacheBackendTest, ShaderCacheInvalidEntryWithLoad) { | |
1109 SetCacheType(net::SHADER_CACHE); | |
1110 BackendInvalidEntryWithLoad(); | |
1111 } | |
1112 | |
1113 // We'll be leaking memory from this test. | |
1114 void DiskCacheBackendTest::BackendTrimInvalidEntry() { | |
1115 const int kSize = 0x3000; // 12 kB | |
1116 SetMaxSize(kSize * 10); | |
1117 InitCache(); | |
1118 | |
1119 std::string first("some key"); | |
1120 std::string second("something else"); | |
1121 disk_cache::Entry* entry; | |
1122 ASSERT_EQ(net::OK, CreateEntry(first, &entry)); | |
1123 | |
1124 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize)); | |
1125 memset(buffer->data(), 0, kSize); | |
1126 EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false)); | |
1127 | |
1128 // Simulate a crash. | |
1129 SimulateCrash(); | |
1130 | |
1131 ASSERT_EQ(net::OK, CreateEntry(second, &entry)); | |
1132 EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false)); | |
1133 | |
1134 EXPECT_EQ(2, cache_->GetEntryCount()); | |
1135 SetMaxSize(kSize); | |
1136 entry->Close(); // Trim the cache. | |
1137 FlushQueueForTest(); | |
1138 | |
1139 // If we evicted the entry in less than 20mS, we have one entry in the cache; | |
1140 // if it took more than that, we posted a task and we'll delete the second | |
1141 // entry too. | |
1142 base::MessageLoop::current()->RunUntilIdle(); | |
1143 | |
1144 // This may be not thread-safe in general, but for now it's OK so add some | |
1145 // ThreadSanitizer annotations to ignore data races on cache_. | |
1146 // See http://crbug.com/55970 | |
1147 ANNOTATE_IGNORE_READS_BEGIN(); | |
1148 EXPECT_GE(1, cache_->GetEntryCount()); | |
1149 ANNOTATE_IGNORE_READS_END(); | |
1150 | |
1151 EXPECT_NE(net::OK, OpenEntry(first, &entry)); | |
1152 } | |
1153 | |
1154 // We'll be leaking memory from this test. | |
1155 TEST_F(DiskCacheBackendTest, TrimInvalidEntry) { | |
1156 BackendTrimInvalidEntry(); | |
1157 } | |
1158 | |
1159 // We'll be leaking memory from this test. | |
1160 TEST_F(DiskCacheBackendTest, NewEvictionTrimInvalidEntry) { | |
1161 SetNewEviction(); | |
1162 BackendTrimInvalidEntry(); | |
1163 } | |
1164 | |
1165 // We'll be leaking memory from this test. | |
1166 void DiskCacheBackendTest::BackendTrimInvalidEntry2() { | |
1167 SetMask(0xf); // 16-entry table. | |
1168 | |
1169 const int kSize = 0x3000; // 12 kB | |
1170 SetMaxSize(kSize * 40); | |
1171 InitCache(); | |
1172 | |
1173 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize)); | |
1174 memset(buffer->data(), 0, kSize); | |
1175 disk_cache::Entry* entry; | |
1176 | |
1177 // Writing 32 entries to this cache chains most of them. | |
1178 for (int i = 0; i < 32; i++) { | |
1179 std::string key(base::StringPrintf("some key %d", i)); | |
1180 ASSERT_EQ(net::OK, CreateEntry(key, &entry)); | |
1181 EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false)); | |
1182 entry->Close(); | |
1183 ASSERT_EQ(net::OK, OpenEntry(key, &entry)); | |
1184 // Note that we are not closing the entries. | |
1185 } | |
1186 | |
1187 // Simulate a crash. | |
1188 SimulateCrash(); | |
1189 | |
1190 ASSERT_EQ(net::OK, CreateEntry("Something else", &entry)); | |
1191 EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false)); | |
1192 | |
1193 FlushQueueForTest(); | |
1194 EXPECT_EQ(33, cache_->GetEntryCount()); | |
1195 SetMaxSize(kSize); | |
1196 | |
1197 // For the new eviction code, all corrupt entries are on the second list so | |
1198 // they are not going away that easy. | |
1199 if (new_eviction_) { | |
1200 EXPECT_EQ(net::OK, DoomAllEntries()); | |
1201 } | |
1202 | |
1203 entry->Close(); // Trim the cache. | |
1204 FlushQueueForTest(); | |
1205 | |
1206 // We may abort the eviction before cleaning up everything. | |
1207 base::MessageLoop::current()->RunUntilIdle(); | |
1208 FlushQueueForTest(); | |
1209 // If it's not clear enough: we may still have eviction tasks running at this | |
1210 // time, so the number of entries is changing while we read it. | |
1211 ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN(); | |
1212 EXPECT_GE(30, cache_->GetEntryCount()); | |
1213 ANNOTATE_IGNORE_READS_AND_WRITES_END(); | |
1214 } | |
1215 | |
1216 // We'll be leaking memory from this test. | |
1217 TEST_F(DiskCacheBackendTest, TrimInvalidEntry2) { | |
1218 BackendTrimInvalidEntry2(); | |
1219 } | |
1220 | |
1221 // We'll be leaking memory from this test. | |
1222 TEST_F(DiskCacheBackendTest, NewEvictionTrimInvalidEntry2) { | |
1223 SetNewEviction(); | |
1224 BackendTrimInvalidEntry2(); | |
1225 } | |
1226 #endif // !defined(LEAK_SANITIZER) | |
1227 | |
1228 void DiskCacheBackendTest::BackendEnumerations() { | |
1229 InitCache(); | |
1230 Time initial = Time::Now(); | |
1231 | |
1232 const int kNumEntries = 100; | |
1233 for (int i = 0; i < kNumEntries; i++) { | |
1234 std::string key = GenerateKey(true); | |
1235 disk_cache::Entry* entry; | |
1236 ASSERT_EQ(net::OK, CreateEntry(key, &entry)); | |
1237 entry->Close(); | |
1238 } | |
1239 EXPECT_EQ(kNumEntries, cache_->GetEntryCount()); | |
1240 Time final = Time::Now(); | |
1241 | |
1242 disk_cache::Entry* entry; | |
1243 scoped_ptr<TestIterator> iter = CreateIterator(); | |
1244 int count = 0; | |
1245 Time last_modified[kNumEntries]; | |
1246 Time last_used[kNumEntries]; | |
1247 while (iter->OpenNextEntry(&entry) == net::OK) { | |
1248 ASSERT_TRUE(NULL != entry); | |
1249 if (count < kNumEntries) { | |
1250 last_modified[count] = entry->GetLastModified(); | |
1251 last_used[count] = entry->GetLastUsed(); | |
1252 EXPECT_TRUE(initial <= last_modified[count]); | |
1253 EXPECT_TRUE(final >= last_modified[count]); | |
1254 } | |
1255 | |
1256 entry->Close(); | |
1257 count++; | |
1258 }; | |
1259 EXPECT_EQ(kNumEntries, count); | |
1260 | |
1261 iter = CreateIterator(); | |
1262 count = 0; | |
1263 // The previous enumeration should not have changed the timestamps. | |
1264 while (iter->OpenNextEntry(&entry) == net::OK) { | |
1265 ASSERT_TRUE(NULL != entry); | |
1266 if (count < kNumEntries) { | |
1267 EXPECT_TRUE(last_modified[count] == entry->GetLastModified()); | |
1268 EXPECT_TRUE(last_used[count] == entry->GetLastUsed()); | |
1269 } | |
1270 entry->Close(); | |
1271 count++; | |
1272 }; | |
1273 EXPECT_EQ(kNumEntries, count); | |
1274 } | |
1275 | |
1276 TEST_F(DiskCacheBackendTest, Enumerations) { | |
1277 BackendEnumerations(); | |
1278 } | |
1279 | |
1280 TEST_F(DiskCacheBackendTest, NewEvictionEnumerations) { | |
1281 SetNewEviction(); | |
1282 BackendEnumerations(); | |
1283 } | |
1284 | |
1285 TEST_F(DiskCacheBackendTest, MemoryOnlyEnumerations) { | |
1286 SetMemoryOnlyMode(); | |
1287 BackendEnumerations(); | |
1288 } | |
1289 | |
1290 TEST_F(DiskCacheBackendTest, ShaderCacheEnumerations) { | |
1291 SetCacheType(net::SHADER_CACHE); | |
1292 BackendEnumerations(); | |
1293 } | |
1294 | |
1295 TEST_F(DiskCacheBackendTest, AppCacheEnumerations) { | |
1296 SetCacheType(net::APP_CACHE); | |
1297 BackendEnumerations(); | |
1298 } | |
1299 | |
1300 // Verifies enumerations while entries are open. | |
1301 void DiskCacheBackendTest::BackendEnumerations2() { | |
1302 InitCache(); | |
1303 const std::string first("first"); | |
1304 const std::string second("second"); | |
1305 disk_cache::Entry *entry1, *entry2; | |
1306 ASSERT_EQ(net::OK, CreateEntry(first, &entry1)); | |
1307 entry1->Close(); | |
1308 ASSERT_EQ(net::OK, CreateEntry(second, &entry2)); | |
1309 entry2->Close(); | |
1310 FlushQueueForTest(); | |
1311 | |
1312 // Make sure that the timestamp is not the same. | |
1313 AddDelay(); | |
1314 ASSERT_EQ(net::OK, OpenEntry(second, &entry1)); | |
1315 scoped_ptr<TestIterator> iter = CreateIterator(); | |
1316 ASSERT_EQ(net::OK, iter->OpenNextEntry(&entry2)); | |
1317 EXPECT_EQ(entry2->GetKey(), second); | |
1318 | |
1319 // Two entries and the iterator pointing at "first". | |
1320 entry1->Close(); | |
1321 entry2->Close(); | |
1322 | |
1323 // The iterator should still be valid, so we should not crash. | |
1324 ASSERT_EQ(net::OK, iter->OpenNextEntry(&entry2)); | |
1325 EXPECT_EQ(entry2->GetKey(), first); | |
1326 entry2->Close(); | |
1327 iter = CreateIterator(); | |
1328 | |
1329 // Modify the oldest entry and get the newest element. | |
1330 ASSERT_EQ(net::OK, OpenEntry(first, &entry1)); | |
1331 EXPECT_EQ(0, WriteData(entry1, 0, 200, NULL, 0, false)); | |
1332 ASSERT_EQ(net::OK, iter->OpenNextEntry(&entry2)); | |
1333 if (type_ == net::APP_CACHE) { | |
1334 // The list is not updated. | |
1335 EXPECT_EQ(entry2->GetKey(), second); | |
1336 } else { | |
1337 EXPECT_EQ(entry2->GetKey(), first); | |
1338 } | |
1339 | |
1340 entry1->Close(); | |
1341 entry2->Close(); | |
1342 } | |
1343 | |
1344 TEST_F(DiskCacheBackendTest, Enumerations2) { | |
1345 BackendEnumerations2(); | |
1346 } | |
1347 | |
1348 TEST_F(DiskCacheBackendTest, NewEvictionEnumerations2) { | |
1349 SetNewEviction(); | |
1350 BackendEnumerations2(); | |
1351 } | |
1352 | |
1353 TEST_F(DiskCacheBackendTest, MemoryOnlyEnumerations2) { | |
1354 SetMemoryOnlyMode(); | |
1355 BackendEnumerations2(); | |
1356 } | |
1357 | |
1358 TEST_F(DiskCacheBackendTest, AppCacheEnumerations2) { | |
1359 SetCacheType(net::APP_CACHE); | |
1360 BackendEnumerations2(); | |
1361 } | |
1362 | |
1363 TEST_F(DiskCacheBackendTest, ShaderCacheEnumerations2) { | |
1364 SetCacheType(net::SHADER_CACHE); | |
1365 BackendEnumerations2(); | |
1366 } | |
1367 | |
1368 // Verify that ReadData calls do not update the LRU cache | |
1369 // when using the SHADER_CACHE type. | |
1370 TEST_F(DiskCacheBackendTest, ShaderCacheEnumerationReadData) { | |
1371 SetCacheType(net::SHADER_CACHE); | |
1372 InitCache(); | |
1373 const std::string first("first"); | |
1374 const std::string second("second"); | |
1375 disk_cache::Entry *entry1, *entry2; | |
1376 const int kSize = 50; | |
1377 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize)); | |
1378 | |
1379 ASSERT_EQ(net::OK, CreateEntry(first, &entry1)); | |
1380 memset(buffer1->data(), 0, kSize); | |
1381 base::strlcpy(buffer1->data(), "And the data to save", kSize); | |
1382 EXPECT_EQ(kSize, WriteData(entry1, 0, 0, buffer1.get(), kSize, false)); | |
1383 | |
1384 ASSERT_EQ(net::OK, CreateEntry(second, &entry2)); | |
1385 entry2->Close(); | |
1386 | |
1387 FlushQueueForTest(); | |
1388 | |
1389 // Make sure that the timestamp is not the same. | |
1390 AddDelay(); | |
1391 | |
1392 // Read from the last item in the LRU. | |
1393 EXPECT_EQ(kSize, ReadData(entry1, 0, 0, buffer1.get(), kSize)); | |
1394 entry1->Close(); | |
1395 | |
1396 scoped_ptr<TestIterator> iter = CreateIterator(); | |
1397 ASSERT_EQ(net::OK, iter->OpenNextEntry(&entry2)); | |
1398 EXPECT_EQ(entry2->GetKey(), second); | |
1399 entry2->Close(); | |
1400 } | |
1401 | |
1402 #if !defined(LEAK_SANITIZER) | |
1403 // Verify handling of invalid entries while doing enumerations. | |
1404 // We'll be leaking memory from this test. | |
1405 void DiskCacheBackendTest::BackendInvalidEntryEnumeration() { | |
1406 InitCache(); | |
1407 | |
1408 std::string key("Some key"); | |
1409 disk_cache::Entry *entry, *entry1, *entry2; | |
1410 ASSERT_EQ(net::OK, CreateEntry(key, &entry1)); | |
1411 | |
1412 const int kSize = 50; | |
1413 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize)); | |
1414 memset(buffer1->data(), 0, kSize); | |
1415 base::strlcpy(buffer1->data(), "And the data to save", kSize); | |
1416 EXPECT_EQ(kSize, WriteData(entry1, 0, 0, buffer1.get(), kSize, false)); | |
1417 entry1->Close(); | |
1418 ASSERT_EQ(net::OK, OpenEntry(key, &entry1)); | |
1419 EXPECT_EQ(kSize, ReadData(entry1, 0, 0, buffer1.get(), kSize)); | |
1420 | |
1421 std::string key2("Another key"); | |
1422 ASSERT_EQ(net::OK, CreateEntry(key2, &entry2)); | |
1423 entry2->Close(); | |
1424 ASSERT_EQ(2, cache_->GetEntryCount()); | |
1425 | |
1426 SimulateCrash(); | |
1427 | |
1428 scoped_ptr<TestIterator> iter = CreateIterator(); | |
1429 int count = 0; | |
1430 while (iter->OpenNextEntry(&entry) == net::OK) { | |
1431 ASSERT_TRUE(NULL != entry); | |
1432 EXPECT_EQ(key2, entry->GetKey()); | |
1433 entry->Close(); | |
1434 count++; | |
1435 }; | |
1436 EXPECT_EQ(1, count); | |
1437 EXPECT_EQ(1, cache_->GetEntryCount()); | |
1438 } | |
1439 | |
1440 // We'll be leaking memory from this test. | |
1441 TEST_F(DiskCacheBackendTest, InvalidEntryEnumeration) { | |
1442 BackendInvalidEntryEnumeration(); | |
1443 } | |
1444 | |
1445 // We'll be leaking memory from this test. | |
1446 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntryEnumeration) { | |
1447 SetNewEviction(); | |
1448 BackendInvalidEntryEnumeration(); | |
1449 } | |
1450 #endif // !defined(LEAK_SANITIZER) | |
1451 | |
1452 // Tests that if for some reason entries are modified close to existing cache | |
1453 // iterators, we don't generate fatal errors or reset the cache. | |
1454 void DiskCacheBackendTest::BackendFixEnumerators() { | |
1455 InitCache(); | |
1456 | |
1457 int seed = static_cast<int>(Time::Now().ToInternalValue()); | |
1458 srand(seed); | |
1459 | |
1460 const int kNumEntries = 10; | |
1461 for (int i = 0; i < kNumEntries; i++) { | |
1462 std::string key = GenerateKey(true); | |
1463 disk_cache::Entry* entry; | |
1464 ASSERT_EQ(net::OK, CreateEntry(key, &entry)); | |
1465 entry->Close(); | |
1466 } | |
1467 EXPECT_EQ(kNumEntries, cache_->GetEntryCount()); | |
1468 | |
1469 disk_cache::Entry *entry1, *entry2; | |
1470 scoped_ptr<TestIterator> iter1 = CreateIterator(), iter2 = CreateIterator(); | |
1471 ASSERT_EQ(net::OK, iter1->OpenNextEntry(&entry1)); | |
1472 ASSERT_TRUE(NULL != entry1); | |
1473 entry1->Close(); | |
1474 entry1 = NULL; | |
1475 | |
1476 // Let's go to the middle of the list. | |
1477 for (int i = 0; i < kNumEntries / 2; i++) { | |
1478 if (entry1) | |
1479 entry1->Close(); | |
1480 ASSERT_EQ(net::OK, iter1->OpenNextEntry(&entry1)); | |
1481 ASSERT_TRUE(NULL != entry1); | |
1482 | |
1483 ASSERT_EQ(net::OK, iter2->OpenNextEntry(&entry2)); | |
1484 ASSERT_TRUE(NULL != entry2); | |
1485 entry2->Close(); | |
1486 } | |
1487 | |
1488 // Messing up with entry1 will modify entry2->next. | |
1489 entry1->Doom(); | |
1490 ASSERT_EQ(net::OK, iter2->OpenNextEntry(&entry2)); | |
1491 ASSERT_TRUE(NULL != entry2); | |
1492 | |
1493 // The link entry2->entry1 should be broken. | |
1494 EXPECT_NE(entry2->GetKey(), entry1->GetKey()); | |
1495 entry1->Close(); | |
1496 entry2->Close(); | |
1497 | |
1498 // And the second iterator should keep working. | |
1499 ASSERT_EQ(net::OK, iter2->OpenNextEntry(&entry2)); | |
1500 ASSERT_TRUE(NULL != entry2); | |
1501 entry2->Close(); | |
1502 } | |
1503 | |
1504 TEST_F(DiskCacheBackendTest, FixEnumerators) { | |
1505 BackendFixEnumerators(); | |
1506 } | |
1507 | |
1508 TEST_F(DiskCacheBackendTest, NewEvictionFixEnumerators) { | |
1509 SetNewEviction(); | |
1510 BackendFixEnumerators(); | |
1511 } | |
1512 | |
1513 void DiskCacheBackendTest::BackendDoomRecent() { | |
1514 InitCache(); | |
1515 | |
1516 disk_cache::Entry *entry; | |
1517 ASSERT_EQ(net::OK, CreateEntry("first", &entry)); | |
1518 entry->Close(); | |
1519 ASSERT_EQ(net::OK, CreateEntry("second", &entry)); | |
1520 entry->Close(); | |
1521 FlushQueueForTest(); | |
1522 | |
1523 AddDelay(); | |
1524 Time middle = Time::Now(); | |
1525 | |
1526 ASSERT_EQ(net::OK, CreateEntry("third", &entry)); | |
1527 entry->Close(); | |
1528 ASSERT_EQ(net::OK, CreateEntry("fourth", &entry)); | |
1529 entry->Close(); | |
1530 FlushQueueForTest(); | |
1531 | |
1532 AddDelay(); | |
1533 Time final = Time::Now(); | |
1534 | |
1535 ASSERT_EQ(4, cache_->GetEntryCount()); | |
1536 EXPECT_EQ(net::OK, DoomEntriesSince(final)); | |
1537 ASSERT_EQ(4, cache_->GetEntryCount()); | |
1538 | |
1539 EXPECT_EQ(net::OK, DoomEntriesSince(middle)); | |
1540 ASSERT_EQ(2, cache_->GetEntryCount()); | |
1541 | |
1542 ASSERT_EQ(net::OK, OpenEntry("second", &entry)); | |
1543 entry->Close(); | |
1544 } | |
1545 | |
1546 TEST_F(DiskCacheBackendTest, DoomRecent) { | |
1547 BackendDoomRecent(); | |
1548 } | |
1549 | |
1550 TEST_F(DiskCacheBackendTest, NewEvictionDoomRecent) { | |
1551 SetNewEviction(); | |
1552 BackendDoomRecent(); | |
1553 } | |
1554 | |
1555 TEST_F(DiskCacheBackendTest, MemoryOnlyDoomRecent) { | |
1556 SetMemoryOnlyMode(); | |
1557 BackendDoomRecent(); | |
1558 } | |
1559 | |
1560 TEST_F(DiskCacheBackendTest, MemoryOnlyDoomEntriesSinceSparse) { | |
1561 SetMemoryOnlyMode(); | |
1562 base::Time start; | |
1563 InitSparseCache(&start, NULL); | |
1564 DoomEntriesSince(start); | |
1565 EXPECT_EQ(1, cache_->GetEntryCount()); | |
1566 } | |
1567 | |
1568 TEST_F(DiskCacheBackendTest, DoomEntriesSinceSparse) { | |
1569 base::Time start; | |
1570 InitSparseCache(&start, NULL); | |
1571 DoomEntriesSince(start); | |
1572 // NOTE: BackendImpl counts child entries in its GetEntryCount(), while | |
1573 // MemBackendImpl does not. Thats why expected value differs here from | |
1574 // MemoryOnlyDoomEntriesSinceSparse. | |
1575 EXPECT_EQ(3, cache_->GetEntryCount()); | |
1576 } | |
1577 | |
1578 TEST_F(DiskCacheBackendTest, MemoryOnlyDoomAllSparse) { | |
1579 SetMemoryOnlyMode(); | |
1580 InitSparseCache(NULL, NULL); | |
1581 EXPECT_EQ(net::OK, DoomAllEntries()); | |
1582 EXPECT_EQ(0, cache_->GetEntryCount()); | |
1583 } | |
1584 | |
1585 TEST_F(DiskCacheBackendTest, DoomAllSparse) { | |
1586 InitSparseCache(NULL, NULL); | |
1587 EXPECT_EQ(net::OK, DoomAllEntries()); | |
1588 EXPECT_EQ(0, cache_->GetEntryCount()); | |
1589 } | |
1590 | |
1591 void DiskCacheBackendTest::BackendDoomBetween() { | |
1592 InitCache(); | |
1593 | |
1594 disk_cache::Entry *entry; | |
1595 ASSERT_EQ(net::OK, CreateEntry("first", &entry)); | |
1596 entry->Close(); | |
1597 FlushQueueForTest(); | |
1598 | |
1599 AddDelay(); | |
1600 Time middle_start = Time::Now(); | |
1601 | |
1602 ASSERT_EQ(net::OK, CreateEntry("second", &entry)); | |
1603 entry->Close(); | |
1604 ASSERT_EQ(net::OK, CreateEntry("third", &entry)); | |
1605 entry->Close(); | |
1606 FlushQueueForTest(); | |
1607 | |
1608 AddDelay(); | |
1609 Time middle_end = Time::Now(); | |
1610 | |
1611 ASSERT_EQ(net::OK, CreateEntry("fourth", &entry)); | |
1612 entry->Close(); | |
1613 ASSERT_EQ(net::OK, OpenEntry("fourth", &entry)); | |
1614 entry->Close(); | |
1615 FlushQueueForTest(); | |
1616 | |
1617 AddDelay(); | |
1618 Time final = Time::Now(); | |
1619 | |
1620 ASSERT_EQ(4, cache_->GetEntryCount()); | |
1621 EXPECT_EQ(net::OK, DoomEntriesBetween(middle_start, middle_end)); | |
1622 ASSERT_EQ(2, cache_->GetEntryCount()); | |
1623 | |
1624 ASSERT_EQ(net::OK, OpenEntry("fourth", &entry)); | |
1625 entry->Close(); | |
1626 | |
1627 EXPECT_EQ(net::OK, DoomEntriesBetween(middle_start, final)); | |
1628 ASSERT_EQ(1, cache_->GetEntryCount()); | |
1629 | |
1630 ASSERT_EQ(net::OK, OpenEntry("first", &entry)); | |
1631 entry->Close(); | |
1632 } | |
1633 | |
1634 TEST_F(DiskCacheBackendTest, DoomBetween) { | |
1635 BackendDoomBetween(); | |
1636 } | |
1637 | |
1638 TEST_F(DiskCacheBackendTest, NewEvictionDoomBetween) { | |
1639 SetNewEviction(); | |
1640 BackendDoomBetween(); | |
1641 } | |
1642 | |
1643 TEST_F(DiskCacheBackendTest, MemoryOnlyDoomBetween) { | |
1644 SetMemoryOnlyMode(); | |
1645 BackendDoomBetween(); | |
1646 } | |
1647 | |
1648 TEST_F(DiskCacheBackendTest, MemoryOnlyDoomEntriesBetweenSparse) { | |
1649 SetMemoryOnlyMode(); | |
1650 base::Time start, end; | |
1651 InitSparseCache(&start, &end); | |
1652 DoomEntriesBetween(start, end); | |
1653 EXPECT_EQ(3, cache_->GetEntryCount()); | |
1654 | |
1655 start = end; | |
1656 end = base::Time::Now(); | |
1657 DoomEntriesBetween(start, end); | |
1658 EXPECT_EQ(1, cache_->GetEntryCount()); | |
1659 } | |
1660 | |
1661 TEST_F(DiskCacheBackendTest, DoomEntriesBetweenSparse) { | |
1662 base::Time start, end; | |
1663 InitSparseCache(&start, &end); | |
1664 DoomEntriesBetween(start, end); | |
1665 EXPECT_EQ(9, cache_->GetEntryCount()); | |
1666 | |
1667 start = end; | |
1668 end = base::Time::Now(); | |
1669 DoomEntriesBetween(start, end); | |
1670 EXPECT_EQ(3, cache_->GetEntryCount()); | |
1671 } | |
1672 | |
1673 void DiskCacheBackendTest::BackendTransaction(const std::string& name, | |
1674 int num_entries, bool load) { | |
1675 success_ = false; | |
1676 ASSERT_TRUE(CopyTestCache(name)); | |
1677 DisableFirstCleanup(); | |
1678 | |
1679 uint32 mask; | |
1680 if (load) { | |
1681 mask = 0xf; | |
1682 SetMaxSize(0x100000); | |
1683 } else { | |
1684 // Clear the settings from the previous run. | |
1685 mask = 0; | |
1686 SetMaxSize(0); | |
1687 } | |
1688 SetMask(mask); | |
1689 | |
1690 InitCache(); | |
1691 ASSERT_EQ(num_entries + 1, cache_->GetEntryCount()); | |
1692 | |
1693 std::string key("the first key"); | |
1694 disk_cache::Entry* entry1; | |
1695 ASSERT_NE(net::OK, OpenEntry(key, &entry1)); | |
1696 | |
1697 int actual = cache_->GetEntryCount(); | |
1698 if (num_entries != actual) { | |
1699 ASSERT_TRUE(load); | |
1700 // If there is a heavy load, inserting an entry will make another entry | |
1701 // dirty (on the hash bucket) so two entries are removed. | |
1702 ASSERT_EQ(num_entries - 1, actual); | |
1703 } | |
1704 | |
1705 cache_.reset(); | |
1706 cache_impl_ = NULL; | |
1707 | |
1708 ASSERT_TRUE(CheckCacheIntegrity(cache_path_, new_eviction_, mask)); | |
1709 success_ = true; | |
1710 } | |
1711 | |
1712 void DiskCacheBackendTest::BackendRecoverInsert() { | |
1713 // Tests with an empty cache. | |
1714 BackendTransaction("insert_empty1", 0, false); | |
1715 ASSERT_TRUE(success_) << "insert_empty1"; | |
1716 BackendTransaction("insert_empty2", 0, false); | |
1717 ASSERT_TRUE(success_) << "insert_empty2"; | |
1718 BackendTransaction("insert_empty3", 0, false); | |
1719 ASSERT_TRUE(success_) << "insert_empty3"; | |
1720 | |
1721 // Tests with one entry on the cache. | |
1722 BackendTransaction("insert_one1", 1, false); | |
1723 ASSERT_TRUE(success_) << "insert_one1"; | |
1724 BackendTransaction("insert_one2", 1, false); | |
1725 ASSERT_TRUE(success_) << "insert_one2"; | |
1726 BackendTransaction("insert_one3", 1, false); | |
1727 ASSERT_TRUE(success_) << "insert_one3"; | |
1728 | |
1729 // Tests with one hundred entries on the cache, tiny index. | |
1730 BackendTransaction("insert_load1", 100, true); | |
1731 ASSERT_TRUE(success_) << "insert_load1"; | |
1732 BackendTransaction("insert_load2", 100, true); | |
1733 ASSERT_TRUE(success_) << "insert_load2"; | |
1734 } | |
1735 | |
1736 TEST_F(DiskCacheBackendTest, RecoverInsert) { | |
1737 BackendRecoverInsert(); | |
1738 } | |
1739 | |
1740 TEST_F(DiskCacheBackendTest, NewEvictionRecoverInsert) { | |
1741 SetNewEviction(); | |
1742 BackendRecoverInsert(); | |
1743 } | |
1744 | |
1745 void DiskCacheBackendTest::BackendRecoverRemove() { | |
1746 // Removing the only element. | |
1747 BackendTransaction("remove_one1", 0, false); | |
1748 ASSERT_TRUE(success_) << "remove_one1"; | |
1749 BackendTransaction("remove_one2", 0, false); | |
1750 ASSERT_TRUE(success_) << "remove_one2"; | |
1751 BackendTransaction("remove_one3", 0, false); | |
1752 ASSERT_TRUE(success_) << "remove_one3"; | |
1753 | |
1754 // Removing the head. | |
1755 BackendTransaction("remove_head1", 1, false); | |
1756 ASSERT_TRUE(success_) << "remove_head1"; | |
1757 BackendTransaction("remove_head2", 1, false); | |
1758 ASSERT_TRUE(success_) << "remove_head2"; | |
1759 BackendTransaction("remove_head3", 1, false); | |
1760 ASSERT_TRUE(success_) << "remove_head3"; | |
1761 | |
1762 // Removing the tail. | |
1763 BackendTransaction("remove_tail1", 1, false); | |
1764 ASSERT_TRUE(success_) << "remove_tail1"; | |
1765 BackendTransaction("remove_tail2", 1, false); | |
1766 ASSERT_TRUE(success_) << "remove_tail2"; | |
1767 BackendTransaction("remove_tail3", 1, false); | |
1768 ASSERT_TRUE(success_) << "remove_tail3"; | |
1769 | |
1770 // Removing with one hundred entries on the cache, tiny index. | |
1771 BackendTransaction("remove_load1", 100, true); | |
1772 ASSERT_TRUE(success_) << "remove_load1"; | |
1773 BackendTransaction("remove_load2", 100, true); | |
1774 ASSERT_TRUE(success_) << "remove_load2"; | |
1775 BackendTransaction("remove_load3", 100, true); | |
1776 ASSERT_TRUE(success_) << "remove_load3"; | |
1777 | |
1778 // This case cannot be reverted. | |
1779 BackendTransaction("remove_one4", 0, false); | |
1780 ASSERT_TRUE(success_) << "remove_one4"; | |
1781 BackendTransaction("remove_head4", 1, false); | |
1782 ASSERT_TRUE(success_) << "remove_head4"; | |
1783 } | |
1784 | |
1785 #if defined(OS_WIN) | |
1786 // http://crbug.com/396392 | |
1787 #define MAYBE_RecoverRemove DISABLED_RecoverRemove | |
1788 #else | |
1789 #define MAYBE_RecoverRemove RecoverRemove | |
1790 #endif | |
1791 TEST_F(DiskCacheBackendTest, MAYBE_RecoverRemove) { | |
1792 BackendRecoverRemove(); | |
1793 } | |
1794 | |
1795 #if defined(OS_WIN) | |
1796 // http://crbug.com/396392 | |
1797 #define MAYBE_NewEvictionRecoverRemove DISABLED_NewEvictionRecoverRemove | |
1798 #else | |
1799 #define MAYBE_NewEvictionRecoverRemove NewEvictionRecoverRemove | |
1800 #endif | |
1801 TEST_F(DiskCacheBackendTest, MAYBE_NewEvictionRecoverRemove) { | |
1802 SetNewEviction(); | |
1803 BackendRecoverRemove(); | |
1804 } | |
1805 | |
1806 void DiskCacheBackendTest::BackendRecoverWithEviction() { | |
1807 success_ = false; | |
1808 ASSERT_TRUE(CopyTestCache("insert_load1")); | |
1809 DisableFirstCleanup(); | |
1810 | |
1811 SetMask(0xf); | |
1812 SetMaxSize(0x1000); | |
1813 | |
1814 // We should not crash here. | |
1815 InitCache(); | |
1816 DisableIntegrityCheck(); | |
1817 } | |
1818 | |
1819 TEST_F(DiskCacheBackendTest, RecoverWithEviction) { | |
1820 BackendRecoverWithEviction(); | |
1821 } | |
1822 | |
1823 TEST_F(DiskCacheBackendTest, NewEvictionRecoverWithEviction) { | |
1824 SetNewEviction(); | |
1825 BackendRecoverWithEviction(); | |
1826 } | |
1827 | |
1828 // Tests that the |BackendImpl| fails to start with the wrong cache version. | |
1829 TEST_F(DiskCacheTest, WrongVersion) { | |
1830 ASSERT_TRUE(CopyTestCache("wrong_version")); | |
1831 base::Thread cache_thread("CacheThread"); | |
1832 ASSERT_TRUE(cache_thread.StartWithOptions( | |
1833 base::Thread::Options(base::MessageLoop::TYPE_IO, 0))); | |
1834 net::TestCompletionCallback cb; | |
1835 | |
1836 scoped_ptr<disk_cache::BackendImpl> cache(new disk_cache::BackendImpl( | |
1837 cache_path_, cache_thread.task_runner(), NULL)); | |
1838 int rv = cache->Init(cb.callback()); | |
1839 ASSERT_EQ(net::ERR_FAILED, cb.GetResult(rv)); | |
1840 } | |
1841 | |
1842 class BadEntropyProvider : public base::FieldTrial::EntropyProvider { | |
1843 public: | |
1844 ~BadEntropyProvider() override {} | |
1845 | |
1846 double GetEntropyForTrial(const std::string& trial_name, | |
1847 uint32 randomization_seed) const override { | |
1848 return 0.5; | |
1849 } | |
1850 }; | |
1851 | |
1852 // Tests that the disk cache successfully joins the control group, dropping the | |
1853 // existing cache in favour of a new empty cache. | |
1854 // Disabled on android since this test requires cache creator to create | |
1855 // blockfile caches. | |
1856 #if !defined(OS_ANDROID) | |
1857 TEST_F(DiskCacheTest, SimpleCacheControlJoin) { | |
1858 base::Thread cache_thread("CacheThread"); | |
1859 ASSERT_TRUE(cache_thread.StartWithOptions( | |
1860 base::Thread::Options(base::MessageLoop::TYPE_IO, 0))); | |
1861 | |
1862 scoped_ptr<disk_cache::BackendImpl> cache = | |
1863 CreateExistingEntryCache(cache_thread, cache_path_); | |
1864 ASSERT_TRUE(cache.get()); | |
1865 cache.reset(); | |
1866 | |
1867 // Instantiate the SimpleCacheTrial, forcing this run into the | |
1868 // ExperimentControl group. | |
1869 base::FieldTrialList field_trial_list(new BadEntropyProvider()); | |
1870 base::FieldTrialList::CreateFieldTrial("SimpleCacheTrial", | |
1871 "ExperimentControl"); | |
1872 net::TestCompletionCallback cb; | |
1873 scoped_ptr<disk_cache::Backend> base_cache; | |
1874 int rv = disk_cache::CreateCacheBackend(net::DISK_CACHE, | |
1875 net::CACHE_BACKEND_BLOCKFILE, | |
1876 cache_path_, | |
1877 0, | |
1878 true, | |
1879 cache_thread.task_runner(), | |
1880 NULL, | |
1881 &base_cache, | |
1882 cb.callback()); | |
1883 ASSERT_EQ(net::OK, cb.GetResult(rv)); | |
1884 EXPECT_EQ(0, base_cache->GetEntryCount()); | |
1885 } | |
1886 #endif | |
1887 | |
1888 // Tests that the disk cache can restart in the control group preserving | |
1889 // existing entries. | |
1890 TEST_F(DiskCacheTest, SimpleCacheControlRestart) { | |
1891 // Instantiate the SimpleCacheTrial, forcing this run into the | |
1892 // ExperimentControl group. | |
1893 base::FieldTrialList field_trial_list(new BadEntropyProvider()); | |
1894 base::FieldTrialList::CreateFieldTrial("SimpleCacheTrial", | |
1895 "ExperimentControl"); | |
1896 | |
1897 base::Thread cache_thread("CacheThread"); | |
1898 ASSERT_TRUE(cache_thread.StartWithOptions( | |
1899 base::Thread::Options(base::MessageLoop::TYPE_IO, 0))); | |
1900 | |
1901 scoped_ptr<disk_cache::BackendImpl> cache = | |
1902 CreateExistingEntryCache(cache_thread, cache_path_); | |
1903 ASSERT_TRUE(cache.get()); | |
1904 | |
1905 net::TestCompletionCallback cb; | |
1906 | |
1907 const int kRestartCount = 5; | |
1908 for (int i = 0; i < kRestartCount; ++i) { | |
1909 cache.reset(new disk_cache::BackendImpl( | |
1910 cache_path_, cache_thread.message_loop_proxy(), NULL)); | |
1911 int rv = cache->Init(cb.callback()); | |
1912 ASSERT_EQ(net::OK, cb.GetResult(rv)); | |
1913 EXPECT_EQ(1, cache->GetEntryCount()); | |
1914 | |
1915 disk_cache::Entry* entry = NULL; | |
1916 rv = cache->OpenEntry(kExistingEntryKey, &entry, cb.callback()); | |
1917 EXPECT_EQ(net::OK, cb.GetResult(rv)); | |
1918 EXPECT_TRUE(entry); | |
1919 entry->Close(); | |
1920 } | |
1921 } | |
1922 | |
1923 // Tests that the disk cache can leave the control group preserving existing | |
1924 // entries. | |
1925 TEST_F(DiskCacheTest, SimpleCacheControlLeave) { | |
1926 base::Thread cache_thread("CacheThread"); | |
1927 ASSERT_TRUE(cache_thread.StartWithOptions( | |
1928 base::Thread::Options(base::MessageLoop::TYPE_IO, 0))); | |
1929 | |
1930 { | |
1931 // Instantiate the SimpleCacheTrial, forcing this run into the | |
1932 // ExperimentControl group. | |
1933 base::FieldTrialList field_trial_list(new BadEntropyProvider()); | |
1934 base::FieldTrialList::CreateFieldTrial("SimpleCacheTrial", | |
1935 "ExperimentControl"); | |
1936 | |
1937 scoped_ptr<disk_cache::BackendImpl> cache = | |
1938 CreateExistingEntryCache(cache_thread, cache_path_); | |
1939 ASSERT_TRUE(cache.get()); | |
1940 } | |
1941 | |
1942 // Instantiate the SimpleCacheTrial, forcing this run into the | |
1943 // ExperimentNo group. | |
1944 base::FieldTrialList field_trial_list(new BadEntropyProvider()); | |
1945 base::FieldTrialList::CreateFieldTrial("SimpleCacheTrial", "ExperimentNo"); | |
1946 net::TestCompletionCallback cb; | |
1947 | |
1948 const int kRestartCount = 5; | |
1949 for (int i = 0; i < kRestartCount; ++i) { | |
1950 scoped_ptr<disk_cache::BackendImpl> cache(new disk_cache::BackendImpl( | |
1951 cache_path_, cache_thread.message_loop_proxy(), NULL)); | |
1952 int rv = cache->Init(cb.callback()); | |
1953 ASSERT_EQ(net::OK, cb.GetResult(rv)); | |
1954 EXPECT_EQ(1, cache->GetEntryCount()); | |
1955 | |
1956 disk_cache::Entry* entry = NULL; | |
1957 rv = cache->OpenEntry(kExistingEntryKey, &entry, cb.callback()); | |
1958 EXPECT_EQ(net::OK, cb.GetResult(rv)); | |
1959 EXPECT_TRUE(entry); | |
1960 entry->Close(); | |
1961 } | |
1962 } | |
1963 | |
1964 // Tests that the cache is properly restarted on recovery error. | |
1965 // Disabled on android since this test requires cache creator to create | |
1966 // blockfile caches. | |
1967 #if !defined(OS_ANDROID) | |
1968 TEST_F(DiskCacheBackendTest, DeleteOld) { | |
1969 ASSERT_TRUE(CopyTestCache("wrong_version")); | |
1970 SetNewEviction(); | |
1971 base::Thread cache_thread("CacheThread"); | |
1972 ASSERT_TRUE(cache_thread.StartWithOptions( | |
1973 base::Thread::Options(base::MessageLoop::TYPE_IO, 0))); | |
1974 | |
1975 net::TestCompletionCallback cb; | |
1976 bool prev = base::ThreadRestrictions::SetIOAllowed(false); | |
1977 base::FilePath path(cache_path_); | |
1978 int rv = disk_cache::CreateCacheBackend(net::DISK_CACHE, | |
1979 net::CACHE_BACKEND_BLOCKFILE, | |
1980 path, | |
1981 0, | |
1982 true, | |
1983 cache_thread.task_runner(), | |
1984 NULL, | |
1985 &cache_, | |
1986 cb.callback()); | |
1987 path.clear(); // Make sure path was captured by the previous call. | |
1988 ASSERT_EQ(net::OK, cb.GetResult(rv)); | |
1989 base::ThreadRestrictions::SetIOAllowed(prev); | |
1990 cache_.reset(); | |
1991 EXPECT_TRUE(CheckCacheIntegrity(cache_path_, new_eviction_, mask_)); | |
1992 } | |
1993 #endif | |
1994 | |
1995 // We want to be able to deal with messed up entries on disk. | |
1996 void DiskCacheBackendTest::BackendInvalidEntry2() { | |
1997 ASSERT_TRUE(CopyTestCache("bad_entry")); | |
1998 DisableFirstCleanup(); | |
1999 InitCache(); | |
2000 | |
2001 disk_cache::Entry *entry1, *entry2; | |
2002 ASSERT_EQ(net::OK, OpenEntry("the first key", &entry1)); | |
2003 EXPECT_NE(net::OK, OpenEntry("some other key", &entry2)); | |
2004 entry1->Close(); | |
2005 | |
2006 // CheckCacheIntegrity will fail at this point. | |
2007 DisableIntegrityCheck(); | |
2008 } | |
2009 | |
2010 TEST_F(DiskCacheBackendTest, InvalidEntry2) { | |
2011 BackendInvalidEntry2(); | |
2012 } | |
2013 | |
2014 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntry2) { | |
2015 SetNewEviction(); | |
2016 BackendInvalidEntry2(); | |
2017 } | |
2018 | |
2019 // Tests that we don't crash or hang when enumerating this cache. | |
2020 void DiskCacheBackendTest::BackendInvalidEntry3() { | |
2021 SetMask(0x1); // 2-entry table. | |
2022 SetMaxSize(0x3000); // 12 kB. | |
2023 DisableFirstCleanup(); | |
2024 InitCache(); | |
2025 | |
2026 disk_cache::Entry* entry; | |
2027 scoped_ptr<TestIterator> iter = CreateIterator(); | |
2028 while (iter->OpenNextEntry(&entry) == net::OK) { | |
2029 entry->Close(); | |
2030 } | |
2031 } | |
2032 | |
2033 TEST_F(DiskCacheBackendTest, InvalidEntry3) { | |
2034 ASSERT_TRUE(CopyTestCache("dirty_entry3")); | |
2035 BackendInvalidEntry3(); | |
2036 } | |
2037 | |
2038 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntry3) { | |
2039 ASSERT_TRUE(CopyTestCache("dirty_entry4")); | |
2040 SetNewEviction(); | |
2041 BackendInvalidEntry3(); | |
2042 DisableIntegrityCheck(); | |
2043 } | |
2044 | |
2045 // Test that we handle a dirty entry on the LRU list, already replaced with | |
2046 // the same key, and with hash collisions. | |
2047 TEST_F(DiskCacheBackendTest, InvalidEntry4) { | |
2048 ASSERT_TRUE(CopyTestCache("dirty_entry3")); | |
2049 SetMask(0x1); // 2-entry table. | |
2050 SetMaxSize(0x3000); // 12 kB. | |
2051 DisableFirstCleanup(); | |
2052 InitCache(); | |
2053 | |
2054 TrimForTest(false); | |
2055 } | |
2056 | |
2057 // Test that we handle a dirty entry on the deleted list, already replaced with | |
2058 // the same key, and with hash collisions. | |
2059 TEST_F(DiskCacheBackendTest, InvalidEntry5) { | |
2060 ASSERT_TRUE(CopyTestCache("dirty_entry4")); | |
2061 SetNewEviction(); | |
2062 SetMask(0x1); // 2-entry table. | |
2063 SetMaxSize(0x3000); // 12 kB. | |
2064 DisableFirstCleanup(); | |
2065 InitCache(); | |
2066 | |
2067 TrimDeletedListForTest(false); | |
2068 } | |
2069 | |
2070 TEST_F(DiskCacheBackendTest, InvalidEntry6) { | |
2071 ASSERT_TRUE(CopyTestCache("dirty_entry5")); | |
2072 SetMask(0x1); // 2-entry table. | |
2073 SetMaxSize(0x3000); // 12 kB. | |
2074 DisableFirstCleanup(); | |
2075 InitCache(); | |
2076 | |
2077 // There is a dirty entry (but marked as clean) at the end, pointing to a | |
2078 // deleted entry through the hash collision list. We should not re-insert the | |
2079 // deleted entry into the index table. | |
2080 | |
2081 TrimForTest(false); | |
2082 // The cache should be clean (as detected by CheckCacheIntegrity). | |
2083 } | |
2084 | |
2085 // Tests that we don't hang when there is a loop on the hash collision list. | |
2086 // The test cache could be a result of bug 69135. | |
2087 TEST_F(DiskCacheBackendTest, BadNextEntry1) { | |
2088 ASSERT_TRUE(CopyTestCache("list_loop2")); | |
2089 SetMask(0x1); // 2-entry table. | |
2090 SetMaxSize(0x3000); // 12 kB. | |
2091 DisableFirstCleanup(); | |
2092 InitCache(); | |
2093 | |
2094 // The second entry points at itselft, and the first entry is not accessible | |
2095 // though the index, but it is at the head of the LRU. | |
2096 | |
2097 disk_cache::Entry* entry; | |
2098 ASSERT_EQ(net::OK, CreateEntry("The first key", &entry)); | |
2099 entry->Close(); | |
2100 | |
2101 TrimForTest(false); | |
2102 TrimForTest(false); | |
2103 ASSERT_EQ(net::OK, OpenEntry("The first key", &entry)); | |
2104 entry->Close(); | |
2105 EXPECT_EQ(1, cache_->GetEntryCount()); | |
2106 } | |
2107 | |
2108 // Tests that we don't hang when there is a loop on the hash collision list. | |
2109 // The test cache could be a result of bug 69135. | |
2110 TEST_F(DiskCacheBackendTest, BadNextEntry2) { | |
2111 ASSERT_TRUE(CopyTestCache("list_loop3")); | |
2112 SetMask(0x1); // 2-entry table. | |
2113 SetMaxSize(0x3000); // 12 kB. | |
2114 DisableFirstCleanup(); | |
2115 InitCache(); | |
2116 | |
2117 // There is a wide loop of 5 entries. | |
2118 | |
2119 disk_cache::Entry* entry; | |
2120 ASSERT_NE(net::OK, OpenEntry("Not present key", &entry)); | |
2121 } | |
2122 | |
2123 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntry6) { | |
2124 ASSERT_TRUE(CopyTestCache("bad_rankings3")); | |
2125 DisableFirstCleanup(); | |
2126 SetNewEviction(); | |
2127 InitCache(); | |
2128 | |
2129 // The second entry is dirty, but removing it should not corrupt the list. | |
2130 disk_cache::Entry* entry; | |
2131 ASSERT_NE(net::OK, OpenEntry("the second key", &entry)); | |
2132 ASSERT_EQ(net::OK, OpenEntry("the first key", &entry)); | |
2133 | |
2134 // This should not delete the cache. | |
2135 entry->Doom(); | |
2136 FlushQueueForTest(); | |
2137 entry->Close(); | |
2138 | |
2139 ASSERT_EQ(net::OK, OpenEntry("some other key", &entry)); | |
2140 entry->Close(); | |
2141 } | |
2142 | |
2143 // Tests handling of corrupt entries by keeping the rankings node around, with | |
2144 // a fatal failure. | |
2145 void DiskCacheBackendTest::BackendInvalidEntry7() { | |
2146 const int kSize = 0x3000; // 12 kB. | |
2147 SetMaxSize(kSize * 10); | |
2148 InitCache(); | |
2149 | |
2150 std::string first("some key"); | |
2151 std::string second("something else"); | |
2152 disk_cache::Entry* entry; | |
2153 ASSERT_EQ(net::OK, CreateEntry(first, &entry)); | |
2154 entry->Close(); | |
2155 ASSERT_EQ(net::OK, CreateEntry(second, &entry)); | |
2156 | |
2157 // Corrupt this entry. | |
2158 disk_cache::EntryImpl* entry_impl = | |
2159 static_cast<disk_cache::EntryImpl*>(entry); | |
2160 | |
2161 entry_impl->rankings()->Data()->next = 0; | |
2162 entry_impl->rankings()->Store(); | |
2163 entry->Close(); | |
2164 FlushQueueForTest(); | |
2165 EXPECT_EQ(2, cache_->GetEntryCount()); | |
2166 | |
2167 // This should detect the bad entry. | |
2168 EXPECT_NE(net::OK, OpenEntry(second, &entry)); | |
2169 EXPECT_EQ(1, cache_->GetEntryCount()); | |
2170 | |
2171 // We should delete the cache. The list still has a corrupt node. | |
2172 scoped_ptr<TestIterator> iter = CreateIterator(); | |
2173 EXPECT_NE(net::OK, iter->OpenNextEntry(&entry)); | |
2174 FlushQueueForTest(); | |
2175 EXPECT_EQ(0, cache_->GetEntryCount()); | |
2176 } | |
2177 | |
2178 TEST_F(DiskCacheBackendTest, InvalidEntry7) { | |
2179 BackendInvalidEntry7(); | |
2180 } | |
2181 | |
2182 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntry7) { | |
2183 SetNewEviction(); | |
2184 BackendInvalidEntry7(); | |
2185 } | |
2186 | |
2187 // Tests handling of corrupt entries by keeping the rankings node around, with | |
2188 // a non fatal failure. | |
2189 void DiskCacheBackendTest::BackendInvalidEntry8() { | |
2190 const int kSize = 0x3000; // 12 kB | |
2191 SetMaxSize(kSize * 10); | |
2192 InitCache(); | |
2193 | |
2194 std::string first("some key"); | |
2195 std::string second("something else"); | |
2196 disk_cache::Entry* entry; | |
2197 ASSERT_EQ(net::OK, CreateEntry(first, &entry)); | |
2198 entry->Close(); | |
2199 ASSERT_EQ(net::OK, CreateEntry(second, &entry)); | |
2200 | |
2201 // Corrupt this entry. | |
2202 disk_cache::EntryImpl* entry_impl = | |
2203 static_cast<disk_cache::EntryImpl*>(entry); | |
2204 | |
2205 entry_impl->rankings()->Data()->contents = 0; | |
2206 entry_impl->rankings()->Store(); | |
2207 entry->Close(); | |
2208 FlushQueueForTest(); | |
2209 EXPECT_EQ(2, cache_->GetEntryCount()); | |
2210 | |
2211 // This should detect the bad entry. | |
2212 EXPECT_NE(net::OK, OpenEntry(second, &entry)); | |
2213 EXPECT_EQ(1, cache_->GetEntryCount()); | |
2214 | |
2215 // We should not delete the cache. | |
2216 scoped_ptr<TestIterator> iter = CreateIterator(); | |
2217 ASSERT_EQ(net::OK, iter->OpenNextEntry(&entry)); | |
2218 entry->Close(); | |
2219 EXPECT_NE(net::OK, iter->OpenNextEntry(&entry)); | |
2220 EXPECT_EQ(1, cache_->GetEntryCount()); | |
2221 } | |
2222 | |
2223 TEST_F(DiskCacheBackendTest, InvalidEntry8) { | |
2224 BackendInvalidEntry8(); | |
2225 } | |
2226 | |
2227 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntry8) { | |
2228 SetNewEviction(); | |
2229 BackendInvalidEntry8(); | |
2230 } | |
2231 | |
2232 // Tests handling of corrupt entries detected by enumerations. Note that these | |
2233 // tests (xx9 to xx11) are basically just going though slightly different | |
2234 // codepaths so they are tighlty coupled with the code, but that is better than | |
2235 // not testing error handling code. | |
2236 void DiskCacheBackendTest::BackendInvalidEntry9(bool eviction) { | |
2237 const int kSize = 0x3000; // 12 kB. | |
2238 SetMaxSize(kSize * 10); | |
2239 InitCache(); | |
2240 | |
2241 std::string first("some key"); | |
2242 std::string second("something else"); | |
2243 disk_cache::Entry* entry; | |
2244 ASSERT_EQ(net::OK, CreateEntry(first, &entry)); | |
2245 entry->Close(); | |
2246 ASSERT_EQ(net::OK, CreateEntry(second, &entry)); | |
2247 | |
2248 // Corrupt this entry. | |
2249 disk_cache::EntryImpl* entry_impl = | |
2250 static_cast<disk_cache::EntryImpl*>(entry); | |
2251 | |
2252 entry_impl->entry()->Data()->state = 0xbad; | |
2253 entry_impl->entry()->Store(); | |
2254 entry->Close(); | |
2255 FlushQueueForTest(); | |
2256 EXPECT_EQ(2, cache_->GetEntryCount()); | |
2257 | |
2258 if (eviction) { | |
2259 TrimForTest(false); | |
2260 EXPECT_EQ(1, cache_->GetEntryCount()); | |
2261 TrimForTest(false); | |
2262 EXPECT_EQ(1, cache_->GetEntryCount()); | |
2263 } else { | |
2264 // We should detect the problem through the list, but we should not delete | |
2265 // the entry, just fail the iteration. | |
2266 scoped_ptr<TestIterator> iter = CreateIterator(); | |
2267 EXPECT_NE(net::OK, iter->OpenNextEntry(&entry)); | |
2268 | |
2269 // Now a full iteration will work, and return one entry. | |
2270 ASSERT_EQ(net::OK, iter->OpenNextEntry(&entry)); | |
2271 entry->Close(); | |
2272 EXPECT_NE(net::OK, iter->OpenNextEntry(&entry)); | |
2273 | |
2274 // This should detect what's left of the bad entry. | |
2275 EXPECT_NE(net::OK, OpenEntry(second, &entry)); | |
2276 EXPECT_EQ(2, cache_->GetEntryCount()); | |
2277 } | |
2278 DisableIntegrityCheck(); | |
2279 } | |
2280 | |
2281 TEST_F(DiskCacheBackendTest, InvalidEntry9) { | |
2282 BackendInvalidEntry9(false); | |
2283 } | |
2284 | |
2285 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntry9) { | |
2286 SetNewEviction(); | |
2287 BackendInvalidEntry9(false); | |
2288 } | |
2289 | |
2290 TEST_F(DiskCacheBackendTest, TrimInvalidEntry9) { | |
2291 BackendInvalidEntry9(true); | |
2292 } | |
2293 | |
2294 TEST_F(DiskCacheBackendTest, NewEvictionTrimInvalidEntry9) { | |
2295 SetNewEviction(); | |
2296 BackendInvalidEntry9(true); | |
2297 } | |
2298 | |
2299 // Tests handling of corrupt entries detected by enumerations. | |
2300 void DiskCacheBackendTest::BackendInvalidEntry10(bool eviction) { | |
2301 const int kSize = 0x3000; // 12 kB. | |
2302 SetMaxSize(kSize * 10); | |
2303 SetNewEviction(); | |
2304 InitCache(); | |
2305 | |
2306 std::string first("some key"); | |
2307 std::string second("something else"); | |
2308 disk_cache::Entry* entry; | |
2309 ASSERT_EQ(net::OK, CreateEntry(first, &entry)); | |
2310 entry->Close(); | |
2311 ASSERT_EQ(net::OK, OpenEntry(first, &entry)); | |
2312 EXPECT_EQ(0, WriteData(entry, 0, 200, NULL, 0, false)); | |
2313 entry->Close(); | |
2314 ASSERT_EQ(net::OK, CreateEntry(second, &entry)); | |
2315 | |
2316 // Corrupt this entry. | |
2317 disk_cache::EntryImpl* entry_impl = | |
2318 static_cast<disk_cache::EntryImpl*>(entry); | |
2319 | |
2320 entry_impl->entry()->Data()->state = 0xbad; | |
2321 entry_impl->entry()->Store(); | |
2322 entry->Close(); | |
2323 ASSERT_EQ(net::OK, CreateEntry("third", &entry)); | |
2324 entry->Close(); | |
2325 EXPECT_EQ(3, cache_->GetEntryCount()); | |
2326 | |
2327 // We have: | |
2328 // List 0: third -> second (bad). | |
2329 // List 1: first. | |
2330 | |
2331 if (eviction) { | |
2332 // Detection order: second -> first -> third. | |
2333 TrimForTest(false); | |
2334 EXPECT_EQ(3, cache_->GetEntryCount()); | |
2335 TrimForTest(false); | |
2336 EXPECT_EQ(2, cache_->GetEntryCount()); | |
2337 TrimForTest(false); | |
2338 EXPECT_EQ(1, cache_->GetEntryCount()); | |
2339 } else { | |
2340 // Detection order: third -> second -> first. | |
2341 // We should detect the problem through the list, but we should not delete | |
2342 // the entry. | |
2343 scoped_ptr<TestIterator> iter = CreateIterator(); | |
2344 ASSERT_EQ(net::OK, iter->OpenNextEntry(&entry)); | |
2345 entry->Close(); | |
2346 ASSERT_EQ(net::OK, iter->OpenNextEntry(&entry)); | |
2347 EXPECT_EQ(first, entry->GetKey()); | |
2348 entry->Close(); | |
2349 EXPECT_NE(net::OK, iter->OpenNextEntry(&entry)); | |
2350 } | |
2351 DisableIntegrityCheck(); | |
2352 } | |
2353 | |
2354 TEST_F(DiskCacheBackendTest, InvalidEntry10) { | |
2355 BackendInvalidEntry10(false); | |
2356 } | |
2357 | |
2358 TEST_F(DiskCacheBackendTest, TrimInvalidEntry10) { | |
2359 BackendInvalidEntry10(true); | |
2360 } | |
2361 | |
2362 // Tests handling of corrupt entries detected by enumerations. | |
2363 void DiskCacheBackendTest::BackendInvalidEntry11(bool eviction) { | |
2364 const int kSize = 0x3000; // 12 kB. | |
2365 SetMaxSize(kSize * 10); | |
2366 SetNewEviction(); | |
2367 InitCache(); | |
2368 | |
2369 std::string first("some key"); | |
2370 std::string second("something else"); | |
2371 disk_cache::Entry* entry; | |
2372 ASSERT_EQ(net::OK, CreateEntry(first, &entry)); | |
2373 entry->Close(); | |
2374 ASSERT_EQ(net::OK, OpenEntry(first, &entry)); | |
2375 EXPECT_EQ(0, WriteData(entry, 0, 200, NULL, 0, false)); | |
2376 entry->Close(); | |
2377 ASSERT_EQ(net::OK, CreateEntry(second, &entry)); | |
2378 entry->Close(); | |
2379 ASSERT_EQ(net::OK, OpenEntry(second, &entry)); | |
2380 EXPECT_EQ(0, WriteData(entry, 0, 200, NULL, 0, false)); | |
2381 | |
2382 // Corrupt this entry. | |
2383 disk_cache::EntryImpl* entry_impl = | |
2384 static_cast<disk_cache::EntryImpl*>(entry); | |
2385 | |
2386 entry_impl->entry()->Data()->state = 0xbad; | |
2387 entry_impl->entry()->Store(); | |
2388 entry->Close(); | |
2389 ASSERT_EQ(net::OK, CreateEntry("third", &entry)); | |
2390 entry->Close(); | |
2391 FlushQueueForTest(); | |
2392 EXPECT_EQ(3, cache_->GetEntryCount()); | |
2393 | |
2394 // We have: | |
2395 // List 0: third. | |
2396 // List 1: second (bad) -> first. | |
2397 | |
2398 if (eviction) { | |
2399 // Detection order: third -> first -> second. | |
2400 TrimForTest(false); | |
2401 EXPECT_EQ(2, cache_->GetEntryCount()); | |
2402 TrimForTest(false); | |
2403 EXPECT_EQ(1, cache_->GetEntryCount()); | |
2404 TrimForTest(false); | |
2405 EXPECT_EQ(1, cache_->GetEntryCount()); | |
2406 } else { | |
2407 // Detection order: third -> second. | |
2408 // We should detect the problem through the list, but we should not delete | |
2409 // the entry, just fail the iteration. | |
2410 scoped_ptr<TestIterator> iter = CreateIterator(); | |
2411 ASSERT_EQ(net::OK, iter->OpenNextEntry(&entry)); | |
2412 entry->Close(); | |
2413 EXPECT_NE(net::OK, iter->OpenNextEntry(&entry)); | |
2414 | |
2415 // Now a full iteration will work, and return two entries. | |
2416 ASSERT_EQ(net::OK, iter->OpenNextEntry(&entry)); | |
2417 entry->Close(); | |
2418 ASSERT_EQ(net::OK, iter->OpenNextEntry(&entry)); | |
2419 entry->Close(); | |
2420 EXPECT_NE(net::OK, iter->OpenNextEntry(&entry)); | |
2421 } | |
2422 DisableIntegrityCheck(); | |
2423 } | |
2424 | |
2425 TEST_F(DiskCacheBackendTest, InvalidEntry11) { | |
2426 BackendInvalidEntry11(false); | |
2427 } | |
2428 | |
2429 TEST_F(DiskCacheBackendTest, TrimInvalidEntry11) { | |
2430 BackendInvalidEntry11(true); | |
2431 } | |
2432 | |
2433 // Tests handling of corrupt entries in the middle of a long eviction run. | |
2434 void DiskCacheBackendTest::BackendTrimInvalidEntry12() { | |
2435 const int kSize = 0x3000; // 12 kB | |
2436 SetMaxSize(kSize * 10); | |
2437 InitCache(); | |
2438 | |
2439 std::string first("some key"); | |
2440 std::string second("something else"); | |
2441 disk_cache::Entry* entry; | |
2442 ASSERT_EQ(net::OK, CreateEntry(first, &entry)); | |
2443 entry->Close(); | |
2444 ASSERT_EQ(net::OK, CreateEntry(second, &entry)); | |
2445 | |
2446 // Corrupt this entry. | |
2447 disk_cache::EntryImpl* entry_impl = | |
2448 static_cast<disk_cache::EntryImpl*>(entry); | |
2449 | |
2450 entry_impl->entry()->Data()->state = 0xbad; | |
2451 entry_impl->entry()->Store(); | |
2452 entry->Close(); | |
2453 ASSERT_EQ(net::OK, CreateEntry("third", &entry)); | |
2454 entry->Close(); | |
2455 ASSERT_EQ(net::OK, CreateEntry("fourth", &entry)); | |
2456 TrimForTest(true); | |
2457 EXPECT_EQ(1, cache_->GetEntryCount()); | |
2458 entry->Close(); | |
2459 DisableIntegrityCheck(); | |
2460 } | |
2461 | |
2462 TEST_F(DiskCacheBackendTest, TrimInvalidEntry12) { | |
2463 BackendTrimInvalidEntry12(); | |
2464 } | |
2465 | |
2466 TEST_F(DiskCacheBackendTest, NewEvictionTrimInvalidEntry12) { | |
2467 SetNewEviction(); | |
2468 BackendTrimInvalidEntry12(); | |
2469 } | |
2470 | |
2471 // We want to be able to deal with messed up entries on disk. | |
2472 void DiskCacheBackendTest::BackendInvalidRankings2() { | |
2473 ASSERT_TRUE(CopyTestCache("bad_rankings")); | |
2474 DisableFirstCleanup(); | |
2475 InitCache(); | |
2476 | |
2477 disk_cache::Entry *entry1, *entry2; | |
2478 EXPECT_NE(net::OK, OpenEntry("the first key", &entry1)); | |
2479 ASSERT_EQ(net::OK, OpenEntry("some other key", &entry2)); | |
2480 entry2->Close(); | |
2481 | |
2482 // CheckCacheIntegrity will fail at this point. | |
2483 DisableIntegrityCheck(); | |
2484 } | |
2485 | |
2486 TEST_F(DiskCacheBackendTest, InvalidRankings2) { | |
2487 BackendInvalidRankings2(); | |
2488 } | |
2489 | |
2490 TEST_F(DiskCacheBackendTest, NewEvictionInvalidRankings2) { | |
2491 SetNewEviction(); | |
2492 BackendInvalidRankings2(); | |
2493 } | |
2494 | |
2495 // If the LRU is corrupt, we delete the cache. | |
2496 void DiskCacheBackendTest::BackendInvalidRankings() { | |
2497 disk_cache::Entry* entry; | |
2498 scoped_ptr<TestIterator> iter = CreateIterator(); | |
2499 ASSERT_EQ(net::OK, iter->OpenNextEntry(&entry)); | |
2500 entry->Close(); | |
2501 EXPECT_EQ(2, cache_->GetEntryCount()); | |
2502 | |
2503 EXPECT_NE(net::OK, iter->OpenNextEntry(&entry)); | |
2504 FlushQueueForTest(); // Allow the restart to finish. | |
2505 EXPECT_EQ(0, cache_->GetEntryCount()); | |
2506 } | |
2507 | |
2508 TEST_F(DiskCacheBackendTest, InvalidRankingsSuccess) { | |
2509 ASSERT_TRUE(CopyTestCache("bad_rankings")); | |
2510 DisableFirstCleanup(); | |
2511 InitCache(); | |
2512 BackendInvalidRankings(); | |
2513 } | |
2514 | |
2515 TEST_F(DiskCacheBackendTest, NewEvictionInvalidRankingsSuccess) { | |
2516 ASSERT_TRUE(CopyTestCache("bad_rankings")); | |
2517 DisableFirstCleanup(); | |
2518 SetNewEviction(); | |
2519 InitCache(); | |
2520 BackendInvalidRankings(); | |
2521 } | |
2522 | |
2523 TEST_F(DiskCacheBackendTest, InvalidRankingsFailure) { | |
2524 ASSERT_TRUE(CopyTestCache("bad_rankings")); | |
2525 DisableFirstCleanup(); | |
2526 InitCache(); | |
2527 SetTestMode(); // Fail cache reinitialization. | |
2528 BackendInvalidRankings(); | |
2529 } | |
2530 | |
2531 TEST_F(DiskCacheBackendTest, NewEvictionInvalidRankingsFailure) { | |
2532 ASSERT_TRUE(CopyTestCache("bad_rankings")); | |
2533 DisableFirstCleanup(); | |
2534 SetNewEviction(); | |
2535 InitCache(); | |
2536 SetTestMode(); // Fail cache reinitialization. | |
2537 BackendInvalidRankings(); | |
2538 } | |
2539 | |
2540 // If the LRU is corrupt and we have open entries, we disable the cache. | |
2541 void DiskCacheBackendTest::BackendDisable() { | |
2542 disk_cache::Entry *entry1, *entry2; | |
2543 scoped_ptr<TestIterator> iter = CreateIterator(); | |
2544 ASSERT_EQ(net::OK, iter->OpenNextEntry(&entry1)); | |
2545 | |
2546 EXPECT_NE(net::OK, iter->OpenNextEntry(&entry2)); | |
2547 EXPECT_EQ(0, cache_->GetEntryCount()); | |
2548 EXPECT_NE(net::OK, CreateEntry("Something new", &entry2)); | |
2549 | |
2550 entry1->Close(); | |
2551 FlushQueueForTest(); // Flushing the Close posts a task to restart the cache. | |
2552 FlushQueueForTest(); // This one actually allows that task to complete. | |
2553 | |
2554 EXPECT_EQ(0, cache_->GetEntryCount()); | |
2555 } | |
2556 | |
2557 TEST_F(DiskCacheBackendTest, DisableSuccess) { | |
2558 ASSERT_TRUE(CopyTestCache("bad_rankings")); | |
2559 DisableFirstCleanup(); | |
2560 InitCache(); | |
2561 BackendDisable(); | |
2562 } | |
2563 | |
2564 TEST_F(DiskCacheBackendTest, NewEvictionDisableSuccess) { | |
2565 ASSERT_TRUE(CopyTestCache("bad_rankings")); | |
2566 DisableFirstCleanup(); | |
2567 SetNewEviction(); | |
2568 InitCache(); | |
2569 BackendDisable(); | |
2570 } | |
2571 | |
2572 TEST_F(DiskCacheBackendTest, DisableFailure) { | |
2573 ASSERT_TRUE(CopyTestCache("bad_rankings")); | |
2574 DisableFirstCleanup(); | |
2575 InitCache(); | |
2576 SetTestMode(); // Fail cache reinitialization. | |
2577 BackendDisable(); | |
2578 } | |
2579 | |
2580 TEST_F(DiskCacheBackendTest, NewEvictionDisableFailure) { | |
2581 ASSERT_TRUE(CopyTestCache("bad_rankings")); | |
2582 DisableFirstCleanup(); | |
2583 SetNewEviction(); | |
2584 InitCache(); | |
2585 SetTestMode(); // Fail cache reinitialization. | |
2586 BackendDisable(); | |
2587 } | |
2588 | |
2589 // This is another type of corruption on the LRU; disable the cache. | |
2590 void DiskCacheBackendTest::BackendDisable2() { | |
2591 EXPECT_EQ(8, cache_->GetEntryCount()); | |
2592 | |
2593 disk_cache::Entry* entry; | |
2594 scoped_ptr<TestIterator> iter = CreateIterator(); | |
2595 int count = 0; | |
2596 while (iter->OpenNextEntry(&entry) == net::OK) { | |
2597 ASSERT_TRUE(NULL != entry); | |
2598 entry->Close(); | |
2599 count++; | |
2600 ASSERT_LT(count, 9); | |
2601 }; | |
2602 | |
2603 FlushQueueForTest(); | |
2604 EXPECT_EQ(0, cache_->GetEntryCount()); | |
2605 } | |
2606 | |
2607 TEST_F(DiskCacheBackendTest, DisableSuccess2) { | |
2608 ASSERT_TRUE(CopyTestCache("list_loop")); | |
2609 DisableFirstCleanup(); | |
2610 InitCache(); | |
2611 BackendDisable2(); | |
2612 } | |
2613 | |
2614 TEST_F(DiskCacheBackendTest, NewEvictionDisableSuccess2) { | |
2615 ASSERT_TRUE(CopyTestCache("list_loop")); | |
2616 DisableFirstCleanup(); | |
2617 SetNewEviction(); | |
2618 InitCache(); | |
2619 BackendDisable2(); | |
2620 } | |
2621 | |
2622 TEST_F(DiskCacheBackendTest, DisableFailure2) { | |
2623 ASSERT_TRUE(CopyTestCache("list_loop")); | |
2624 DisableFirstCleanup(); | |
2625 InitCache(); | |
2626 SetTestMode(); // Fail cache reinitialization. | |
2627 BackendDisable2(); | |
2628 } | |
2629 | |
2630 TEST_F(DiskCacheBackendTest, NewEvictionDisableFailure2) { | |
2631 ASSERT_TRUE(CopyTestCache("list_loop")); | |
2632 DisableFirstCleanup(); | |
2633 SetNewEviction(); | |
2634 InitCache(); | |
2635 SetTestMode(); // Fail cache reinitialization. | |
2636 BackendDisable2(); | |
2637 } | |
2638 | |
2639 // If the index size changes when we disable the cache, we should not crash. | |
2640 void DiskCacheBackendTest::BackendDisable3() { | |
2641 disk_cache::Entry *entry1, *entry2; | |
2642 scoped_ptr<TestIterator> iter = CreateIterator(); | |
2643 EXPECT_EQ(2, cache_->GetEntryCount()); | |
2644 ASSERT_EQ(net::OK, iter->OpenNextEntry(&entry1)); | |
2645 entry1->Close(); | |
2646 | |
2647 EXPECT_NE(net::OK, iter->OpenNextEntry(&entry2)); | |
2648 FlushQueueForTest(); | |
2649 | |
2650 ASSERT_EQ(net::OK, CreateEntry("Something new", &entry2)); | |
2651 entry2->Close(); | |
2652 | |
2653 EXPECT_EQ(1, cache_->GetEntryCount()); | |
2654 } | |
2655 | |
2656 TEST_F(DiskCacheBackendTest, DisableSuccess3) { | |
2657 ASSERT_TRUE(CopyTestCache("bad_rankings2")); | |
2658 DisableFirstCleanup(); | |
2659 SetMaxSize(20 * 1024 * 1024); | |
2660 InitCache(); | |
2661 BackendDisable3(); | |
2662 } | |
2663 | |
2664 TEST_F(DiskCacheBackendTest, NewEvictionDisableSuccess3) { | |
2665 ASSERT_TRUE(CopyTestCache("bad_rankings2")); | |
2666 DisableFirstCleanup(); | |
2667 SetMaxSize(20 * 1024 * 1024); | |
2668 SetNewEviction(); | |
2669 InitCache(); | |
2670 BackendDisable3(); | |
2671 } | |
2672 | |
2673 // If we disable the cache, already open entries should work as far as possible. | |
2674 void DiskCacheBackendTest::BackendDisable4() { | |
2675 disk_cache::Entry *entry1, *entry2, *entry3, *entry4; | |
2676 scoped_ptr<TestIterator> iter = CreateIterator(); | |
2677 ASSERT_EQ(net::OK, iter->OpenNextEntry(&entry1)); | |
2678 | |
2679 char key2[2000]; | |
2680 char key3[20000]; | |
2681 CacheTestFillBuffer(key2, sizeof(key2), true); | |
2682 CacheTestFillBuffer(key3, sizeof(key3), true); | |
2683 key2[sizeof(key2) - 1] = '\0'; | |
2684 key3[sizeof(key3) - 1] = '\0'; | |
2685 ASSERT_EQ(net::OK, CreateEntry(key2, &entry2)); | |
2686 ASSERT_EQ(net::OK, CreateEntry(key3, &entry3)); | |
2687 | |
2688 const int kBufSize = 20000; | |
2689 scoped_refptr<net::IOBuffer> buf(new net::IOBuffer(kBufSize)); | |
2690 memset(buf->data(), 0, kBufSize); | |
2691 EXPECT_EQ(100, WriteData(entry2, 0, 0, buf.get(), 100, false)); | |
2692 EXPECT_EQ(kBufSize, WriteData(entry3, 0, 0, buf.get(), kBufSize, false)); | |
2693 | |
2694 // This line should disable the cache but not delete it. | |
2695 EXPECT_NE(net::OK, iter->OpenNextEntry(&entry4)); | |
2696 EXPECT_EQ(0, cache_->GetEntryCount()); | |
2697 | |
2698 EXPECT_NE(net::OK, CreateEntry("cache is disabled", &entry4)); | |
2699 | |
2700 EXPECT_EQ(100, ReadData(entry2, 0, 0, buf.get(), 100)); | |
2701 EXPECT_EQ(100, WriteData(entry2, 0, 0, buf.get(), 100, false)); | |
2702 EXPECT_EQ(100, WriteData(entry2, 1, 0, buf.get(), 100, false)); | |
2703 | |
2704 EXPECT_EQ(kBufSize, ReadData(entry3, 0, 0, buf.get(), kBufSize)); | |
2705 EXPECT_EQ(kBufSize, WriteData(entry3, 0, 0, buf.get(), kBufSize, false)); | |
2706 EXPECT_EQ(kBufSize, WriteData(entry3, 1, 0, buf.get(), kBufSize, false)); | |
2707 | |
2708 std::string key = entry2->GetKey(); | |
2709 EXPECT_EQ(sizeof(key2) - 1, key.size()); | |
2710 key = entry3->GetKey(); | |
2711 EXPECT_EQ(sizeof(key3) - 1, key.size()); | |
2712 | |
2713 entry1->Close(); | |
2714 entry2->Close(); | |
2715 entry3->Close(); | |
2716 FlushQueueForTest(); // Flushing the Close posts a task to restart the cache. | |
2717 FlushQueueForTest(); // This one actually allows that task to complete. | |
2718 | |
2719 EXPECT_EQ(0, cache_->GetEntryCount()); | |
2720 } | |
2721 | |
2722 TEST_F(DiskCacheBackendTest, DisableSuccess4) { | |
2723 ASSERT_TRUE(CopyTestCache("bad_rankings")); | |
2724 DisableFirstCleanup(); | |
2725 InitCache(); | |
2726 BackendDisable4(); | |
2727 } | |
2728 | |
2729 TEST_F(DiskCacheBackendTest, NewEvictionDisableSuccess4) { | |
2730 ASSERT_TRUE(CopyTestCache("bad_rankings")); | |
2731 DisableFirstCleanup(); | |
2732 SetNewEviction(); | |
2733 InitCache(); | |
2734 BackendDisable4(); | |
2735 } | |
2736 | |
2737 TEST_F(DiskCacheTest, Backend_UsageStatsTimer) { | |
2738 MessageLoopHelper helper; | |
2739 | |
2740 ASSERT_TRUE(CleanupCacheDir()); | |
2741 scoped_ptr<disk_cache::BackendImpl> cache; | |
2742 cache.reset(new disk_cache::BackendImpl( | |
2743 cache_path_, base::ThreadTaskRunnerHandle::Get(), NULL)); | |
2744 ASSERT_TRUE(NULL != cache.get()); | |
2745 cache->SetUnitTestMode(); | |
2746 ASSERT_EQ(net::OK, cache->SyncInit()); | |
2747 | |
2748 // Wait for a callback that never comes... about 2 secs :). The message loop | |
2749 // has to run to allow invocation of the usage timer. | |
2750 helper.WaitUntilCacheIoFinished(1); | |
2751 } | |
2752 | |
2753 TEST_F(DiskCacheBackendTest, TimerNotCreated) { | |
2754 ASSERT_TRUE(CopyTestCache("wrong_version")); | |
2755 | |
2756 scoped_ptr<disk_cache::BackendImpl> cache; | |
2757 cache.reset(new disk_cache::BackendImpl( | |
2758 cache_path_, base::ThreadTaskRunnerHandle::Get(), NULL)); | |
2759 ASSERT_TRUE(NULL != cache.get()); | |
2760 cache->SetUnitTestMode(); | |
2761 ASSERT_NE(net::OK, cache->SyncInit()); | |
2762 | |
2763 ASSERT_TRUE(NULL == cache->GetTimerForTest()); | |
2764 | |
2765 DisableIntegrityCheck(); | |
2766 } | |
2767 | |
2768 TEST_F(DiskCacheBackendTest, Backend_UsageStats) { | |
2769 InitCache(); | |
2770 disk_cache::Entry* entry; | |
2771 ASSERT_EQ(net::OK, CreateEntry("key", &entry)); | |
2772 entry->Close(); | |
2773 FlushQueueForTest(); | |
2774 | |
2775 disk_cache::StatsItems stats; | |
2776 cache_->GetStats(&stats); | |
2777 EXPECT_FALSE(stats.empty()); | |
2778 | |
2779 disk_cache::StatsItems::value_type hits("Create hit", "0x1"); | |
2780 EXPECT_EQ(1, std::count(stats.begin(), stats.end(), hits)); | |
2781 | |
2782 cache_.reset(); | |
2783 | |
2784 // Now open the cache and verify that the stats are still there. | |
2785 DisableFirstCleanup(); | |
2786 InitCache(); | |
2787 EXPECT_EQ(1, cache_->GetEntryCount()); | |
2788 | |
2789 stats.clear(); | |
2790 cache_->GetStats(&stats); | |
2791 EXPECT_FALSE(stats.empty()); | |
2792 | |
2793 EXPECT_EQ(1, std::count(stats.begin(), stats.end(), hits)); | |
2794 } | |
2795 | |
2796 void DiskCacheBackendTest::BackendDoomAll() { | |
2797 InitCache(); | |
2798 | |
2799 disk_cache::Entry *entry1, *entry2; | |
2800 ASSERT_EQ(net::OK, CreateEntry("first", &entry1)); | |
2801 ASSERT_EQ(net::OK, CreateEntry("second", &entry2)); | |
2802 entry1->Close(); | |
2803 entry2->Close(); | |
2804 | |
2805 ASSERT_EQ(net::OK, CreateEntry("third", &entry1)); | |
2806 ASSERT_EQ(net::OK, CreateEntry("fourth", &entry2)); | |
2807 | |
2808 ASSERT_EQ(4, cache_->GetEntryCount()); | |
2809 EXPECT_EQ(net::OK, DoomAllEntries()); | |
2810 ASSERT_EQ(0, cache_->GetEntryCount()); | |
2811 | |
2812 // We should stop posting tasks at some point (if we post any). | |
2813 base::MessageLoop::current()->RunUntilIdle(); | |
2814 | |
2815 disk_cache::Entry *entry3, *entry4; | |
2816 EXPECT_NE(net::OK, OpenEntry("third", &entry3)); | |
2817 ASSERT_EQ(net::OK, CreateEntry("third", &entry3)); | |
2818 ASSERT_EQ(net::OK, CreateEntry("fourth", &entry4)); | |
2819 | |
2820 EXPECT_EQ(net::OK, DoomAllEntries()); | |
2821 ASSERT_EQ(0, cache_->GetEntryCount()); | |
2822 | |
2823 entry1->Close(); | |
2824 entry2->Close(); | |
2825 entry3->Doom(); // The entry should be already doomed, but this must work. | |
2826 entry3->Close(); | |
2827 entry4->Close(); | |
2828 | |
2829 // Now try with all references released. | |
2830 ASSERT_EQ(net::OK, CreateEntry("third", &entry1)); | |
2831 ASSERT_EQ(net::OK, CreateEntry("fourth", &entry2)); | |
2832 entry1->Close(); | |
2833 entry2->Close(); | |
2834 | |
2835 ASSERT_EQ(2, cache_->GetEntryCount()); | |
2836 EXPECT_EQ(net::OK, DoomAllEntries()); | |
2837 ASSERT_EQ(0, cache_->GetEntryCount()); | |
2838 | |
2839 EXPECT_EQ(net::OK, DoomAllEntries()); | |
2840 } | |
2841 | |
2842 TEST_F(DiskCacheBackendTest, DoomAll) { | |
2843 BackendDoomAll(); | |
2844 } | |
2845 | |
2846 TEST_F(DiskCacheBackendTest, NewEvictionDoomAll) { | |
2847 SetNewEviction(); | |
2848 BackendDoomAll(); | |
2849 } | |
2850 | |
2851 TEST_F(DiskCacheBackendTest, MemoryOnlyDoomAll) { | |
2852 SetMemoryOnlyMode(); | |
2853 BackendDoomAll(); | |
2854 } | |
2855 | |
2856 TEST_F(DiskCacheBackendTest, AppCacheOnlyDoomAll) { | |
2857 SetCacheType(net::APP_CACHE); | |
2858 BackendDoomAll(); | |
2859 } | |
2860 | |
2861 TEST_F(DiskCacheBackendTest, ShaderCacheOnlyDoomAll) { | |
2862 SetCacheType(net::SHADER_CACHE); | |
2863 BackendDoomAll(); | |
2864 } | |
2865 | |
2866 // If the index size changes when we doom the cache, we should not crash. | |
2867 void DiskCacheBackendTest::BackendDoomAll2() { | |
2868 EXPECT_EQ(2, cache_->GetEntryCount()); | |
2869 EXPECT_EQ(net::OK, DoomAllEntries()); | |
2870 | |
2871 disk_cache::Entry* entry; | |
2872 ASSERT_EQ(net::OK, CreateEntry("Something new", &entry)); | |
2873 entry->Close(); | |
2874 | |
2875 EXPECT_EQ(1, cache_->GetEntryCount()); | |
2876 } | |
2877 | |
2878 TEST_F(DiskCacheBackendTest, DoomAll2) { | |
2879 ASSERT_TRUE(CopyTestCache("bad_rankings2")); | |
2880 DisableFirstCleanup(); | |
2881 SetMaxSize(20 * 1024 * 1024); | |
2882 InitCache(); | |
2883 BackendDoomAll2(); | |
2884 } | |
2885 | |
2886 TEST_F(DiskCacheBackendTest, NewEvictionDoomAll2) { | |
2887 ASSERT_TRUE(CopyTestCache("bad_rankings2")); | |
2888 DisableFirstCleanup(); | |
2889 SetMaxSize(20 * 1024 * 1024); | |
2890 SetNewEviction(); | |
2891 InitCache(); | |
2892 BackendDoomAll2(); | |
2893 } | |
2894 | |
2895 // We should be able to create the same entry on multiple simultaneous instances | |
2896 // of the cache. | |
2897 TEST_F(DiskCacheTest, MultipleInstances) { | |
2898 base::ScopedTempDir store1, store2; | |
2899 ASSERT_TRUE(store1.CreateUniqueTempDir()); | |
2900 ASSERT_TRUE(store2.CreateUniqueTempDir()); | |
2901 | |
2902 base::Thread cache_thread("CacheThread"); | |
2903 ASSERT_TRUE(cache_thread.StartWithOptions( | |
2904 base::Thread::Options(base::MessageLoop::TYPE_IO, 0))); | |
2905 net::TestCompletionCallback cb; | |
2906 | |
2907 const int kNumberOfCaches = 2; | |
2908 scoped_ptr<disk_cache::Backend> cache[kNumberOfCaches]; | |
2909 | |
2910 int rv = disk_cache::CreateCacheBackend(net::DISK_CACHE, | |
2911 net::CACHE_BACKEND_DEFAULT, | |
2912 store1.path(), | |
2913 0, | |
2914 false, | |
2915 cache_thread.task_runner(), | |
2916 NULL, | |
2917 &cache[0], | |
2918 cb.callback()); | |
2919 ASSERT_EQ(net::OK, cb.GetResult(rv)); | |
2920 rv = disk_cache::CreateCacheBackend(net::MEDIA_CACHE, | |
2921 net::CACHE_BACKEND_DEFAULT, | |
2922 store2.path(), | |
2923 0, | |
2924 false, | |
2925 cache_thread.task_runner(), | |
2926 NULL, | |
2927 &cache[1], | |
2928 cb.callback()); | |
2929 ASSERT_EQ(net::OK, cb.GetResult(rv)); | |
2930 | |
2931 ASSERT_TRUE(cache[0].get() != NULL && cache[1].get() != NULL); | |
2932 | |
2933 std::string key("the first key"); | |
2934 disk_cache::Entry* entry; | |
2935 for (int i = 0; i < kNumberOfCaches; i++) { | |
2936 rv = cache[i]->CreateEntry(key, &entry, cb.callback()); | |
2937 ASSERT_EQ(net::OK, cb.GetResult(rv)); | |
2938 entry->Close(); | |
2939 } | |
2940 } | |
2941 | |
2942 // Test the six regions of the curve that determines the max cache size. | |
2943 TEST_F(DiskCacheTest, AutomaticMaxSize) { | |
2944 using disk_cache::kDefaultCacheSize; | |
2945 int64 large_size = kDefaultCacheSize; | |
2946 | |
2947 // Region 1: expected = available * 0.8 | |
2948 EXPECT_EQ((kDefaultCacheSize - 1) * 8 / 10, | |
2949 disk_cache::PreferredCacheSize(large_size - 1)); | |
2950 EXPECT_EQ(kDefaultCacheSize * 8 / 10, | |
2951 disk_cache::PreferredCacheSize(large_size)); | |
2952 EXPECT_EQ(kDefaultCacheSize - 1, | |
2953 disk_cache::PreferredCacheSize(large_size * 10 / 8 - 1)); | |
2954 | |
2955 // Region 2: expected = default_size | |
2956 EXPECT_EQ(kDefaultCacheSize, | |
2957 disk_cache::PreferredCacheSize(large_size * 10 / 8)); | |
2958 EXPECT_EQ(kDefaultCacheSize, | |
2959 disk_cache::PreferredCacheSize(large_size * 10 - 1)); | |
2960 | |
2961 // Region 3: expected = available * 0.1 | |
2962 EXPECT_EQ(kDefaultCacheSize, | |
2963 disk_cache::PreferredCacheSize(large_size * 10)); | |
2964 EXPECT_EQ((kDefaultCacheSize * 25 - 1) / 10, | |
2965 disk_cache::PreferredCacheSize(large_size * 25 - 1)); | |
2966 | |
2967 // Region 4: expected = default_size * 2.5 | |
2968 EXPECT_EQ(kDefaultCacheSize * 25 / 10, | |
2969 disk_cache::PreferredCacheSize(large_size * 25)); | |
2970 EXPECT_EQ(kDefaultCacheSize * 25 / 10, | |
2971 disk_cache::PreferredCacheSize(large_size * 100 - 1)); | |
2972 EXPECT_EQ(kDefaultCacheSize * 25 / 10, | |
2973 disk_cache::PreferredCacheSize(large_size * 100)); | |
2974 EXPECT_EQ(kDefaultCacheSize * 25 / 10, | |
2975 disk_cache::PreferredCacheSize(large_size * 250 - 1)); | |
2976 | |
2977 // Region 5: expected = available * 0.1 | |
2978 int64 largest_size = kDefaultCacheSize * 4; | |
2979 EXPECT_EQ(kDefaultCacheSize * 25 / 10, | |
2980 disk_cache::PreferredCacheSize(large_size * 250)); | |
2981 EXPECT_EQ(largest_size - 1, | |
2982 disk_cache::PreferredCacheSize(largest_size * 100 - 1)); | |
2983 | |
2984 // Region 6: expected = largest possible size | |
2985 EXPECT_EQ(largest_size, | |
2986 disk_cache::PreferredCacheSize(largest_size * 100)); | |
2987 EXPECT_EQ(largest_size, | |
2988 disk_cache::PreferredCacheSize(largest_size * 10000)); | |
2989 } | |
2990 | |
2991 // Tests that we can "migrate" a running instance from one experiment group to | |
2992 // another. | |
2993 TEST_F(DiskCacheBackendTest, Histograms) { | |
2994 InitCache(); | |
2995 disk_cache::BackendImpl* backend_ = cache_impl_; // Needed be the macro. | |
2996 | |
2997 for (int i = 1; i < 3; i++) { | |
2998 CACHE_UMA(HOURS, "FillupTime", i, 28); | |
2999 } | |
3000 } | |
3001 | |
3002 // Make sure that we keep the total memory used by the internal buffers under | |
3003 // control. | |
3004 TEST_F(DiskCacheBackendTest, TotalBuffersSize1) { | |
3005 InitCache(); | |
3006 std::string key("the first key"); | |
3007 disk_cache::Entry* entry; | |
3008 ASSERT_EQ(net::OK, CreateEntry(key, &entry)); | |
3009 | |
3010 const int kSize = 200; | |
3011 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize)); | |
3012 CacheTestFillBuffer(buffer->data(), kSize, true); | |
3013 | |
3014 for (int i = 0; i < 10; i++) { | |
3015 SCOPED_TRACE(i); | |
3016 // Allocate 2MB for this entry. | |
3017 EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, true)); | |
3018 EXPECT_EQ(kSize, WriteData(entry, 1, 0, buffer.get(), kSize, true)); | |
3019 EXPECT_EQ(kSize, | |
3020 WriteData(entry, 0, 1024 * 1024, buffer.get(), kSize, false)); | |
3021 EXPECT_EQ(kSize, | |
3022 WriteData(entry, 1, 1024 * 1024, buffer.get(), kSize, false)); | |
3023 | |
3024 // Delete one of the buffers and truncate the other. | |
3025 EXPECT_EQ(0, WriteData(entry, 0, 0, buffer.get(), 0, true)); | |
3026 EXPECT_EQ(0, WriteData(entry, 1, 10, buffer.get(), 0, true)); | |
3027 | |
3028 // Delete the second buffer, writing 10 bytes to disk. | |
3029 entry->Close(); | |
3030 ASSERT_EQ(net::OK, OpenEntry(key, &entry)); | |
3031 } | |
3032 | |
3033 entry->Close(); | |
3034 EXPECT_EQ(0, cache_impl_->GetTotalBuffersSize()); | |
3035 } | |
3036 | |
3037 // This test assumes at least 150MB of system memory. | |
3038 TEST_F(DiskCacheBackendTest, TotalBuffersSize2) { | |
3039 InitCache(); | |
3040 | |
3041 const int kOneMB = 1024 * 1024; | |
3042 EXPECT_TRUE(cache_impl_->IsAllocAllowed(0, kOneMB)); | |
3043 EXPECT_EQ(kOneMB, cache_impl_->GetTotalBuffersSize()); | |
3044 | |
3045 EXPECT_TRUE(cache_impl_->IsAllocAllowed(0, kOneMB)); | |
3046 EXPECT_EQ(kOneMB * 2, cache_impl_->GetTotalBuffersSize()); | |
3047 | |
3048 EXPECT_TRUE(cache_impl_->IsAllocAllowed(0, kOneMB)); | |
3049 EXPECT_EQ(kOneMB * 3, cache_impl_->GetTotalBuffersSize()); | |
3050 | |
3051 cache_impl_->BufferDeleted(kOneMB); | |
3052 EXPECT_EQ(kOneMB * 2, cache_impl_->GetTotalBuffersSize()); | |
3053 | |
3054 // Check the upper limit. | |
3055 EXPECT_FALSE(cache_impl_->IsAllocAllowed(0, 30 * kOneMB)); | |
3056 | |
3057 for (int i = 0; i < 30; i++) | |
3058 cache_impl_->IsAllocAllowed(0, kOneMB); // Ignore the result. | |
3059 | |
3060 EXPECT_FALSE(cache_impl_->IsAllocAllowed(0, kOneMB)); | |
3061 } | |
3062 | |
3063 // Tests that sharing of external files works and we are able to delete the | |
3064 // files when we need to. | |
3065 TEST_F(DiskCacheBackendTest, FileSharing) { | |
3066 InitCache(); | |
3067 | |
3068 disk_cache::Addr address(0x80000001); | |
3069 ASSERT_TRUE(cache_impl_->CreateExternalFile(&address)); | |
3070 base::FilePath name = cache_impl_->GetFileName(address); | |
3071 | |
3072 scoped_refptr<disk_cache::File> file(new disk_cache::File(false)); | |
3073 file->Init(name); | |
3074 | |
3075 #if defined(OS_WIN) | |
3076 DWORD sharing = FILE_SHARE_READ | FILE_SHARE_WRITE; | |
3077 DWORD access = GENERIC_READ | GENERIC_WRITE; | |
3078 base::win::ScopedHandle file2(CreateFile( | |
3079 name.value().c_str(), access, sharing, NULL, OPEN_EXISTING, 0, NULL)); | |
3080 EXPECT_FALSE(file2.IsValid()); | |
3081 | |
3082 sharing |= FILE_SHARE_DELETE; | |
3083 file2.Set(CreateFile(name.value().c_str(), access, sharing, NULL, | |
3084 OPEN_EXISTING, 0, NULL)); | |
3085 EXPECT_TRUE(file2.IsValid()); | |
3086 #endif | |
3087 | |
3088 EXPECT_TRUE(base::DeleteFile(name, false)); | |
3089 | |
3090 // We should be able to use the file. | |
3091 const int kSize = 200; | |
3092 char buffer1[kSize]; | |
3093 char buffer2[kSize]; | |
3094 memset(buffer1, 't', kSize); | |
3095 memset(buffer2, 0, kSize); | |
3096 EXPECT_TRUE(file->Write(buffer1, kSize, 0)); | |
3097 EXPECT_TRUE(file->Read(buffer2, kSize, 0)); | |
3098 EXPECT_EQ(0, memcmp(buffer1, buffer2, kSize)); | |
3099 | |
3100 EXPECT_TRUE(disk_cache::DeleteCacheFile(name)); | |
3101 } | |
3102 | |
3103 TEST_F(DiskCacheBackendTest, UpdateRankForExternalCacheHit) { | |
3104 InitCache(); | |
3105 | |
3106 disk_cache::Entry* entry; | |
3107 | |
3108 for (int i = 0; i < 2; ++i) { | |
3109 std::string key = base::StringPrintf("key%d", i); | |
3110 ASSERT_EQ(net::OK, CreateEntry(key, &entry)); | |
3111 entry->Close(); | |
3112 } | |
3113 | |
3114 // Ping the oldest entry. | |
3115 cache_->OnExternalCacheHit("key0"); | |
3116 | |
3117 TrimForTest(false); | |
3118 | |
3119 // Make sure the older key remains. | |
3120 EXPECT_EQ(1, cache_->GetEntryCount()); | |
3121 ASSERT_EQ(net::OK, OpenEntry("key0", &entry)); | |
3122 entry->Close(); | |
3123 } | |
3124 | |
3125 TEST_F(DiskCacheBackendTest, ShaderCacheUpdateRankForExternalCacheHit) { | |
3126 SetCacheType(net::SHADER_CACHE); | |
3127 InitCache(); | |
3128 | |
3129 disk_cache::Entry* entry; | |
3130 | |
3131 for (int i = 0; i < 2; ++i) { | |
3132 std::string key = base::StringPrintf("key%d", i); | |
3133 ASSERT_EQ(net::OK, CreateEntry(key, &entry)); | |
3134 entry->Close(); | |
3135 } | |
3136 | |
3137 // Ping the oldest entry. | |
3138 cache_->OnExternalCacheHit("key0"); | |
3139 | |
3140 TrimForTest(false); | |
3141 | |
3142 // Make sure the older key remains. | |
3143 EXPECT_EQ(1, cache_->GetEntryCount()); | |
3144 ASSERT_EQ(net::OK, OpenEntry("key0", &entry)); | |
3145 entry->Close(); | |
3146 } | |
3147 | |
3148 TEST_F(DiskCacheBackendTest, SimpleCacheShutdownWithPendingCreate) { | |
3149 SetCacheType(net::APP_CACHE); | |
3150 SetSimpleCacheMode(); | |
3151 BackendShutdownWithPendingCreate(false); | |
3152 } | |
3153 | |
3154 TEST_F(DiskCacheBackendTest, SimpleCacheShutdownWithPendingFileIO) { | |
3155 SetCacheType(net::APP_CACHE); | |
3156 SetSimpleCacheMode(); | |
3157 BackendShutdownWithPendingFileIO(false); | |
3158 } | |
3159 | |
3160 TEST_F(DiskCacheBackendTest, SimpleCacheBasics) { | |
3161 SetSimpleCacheMode(); | |
3162 BackendBasics(); | |
3163 } | |
3164 | |
3165 TEST_F(DiskCacheBackendTest, SimpleCacheAppCacheBasics) { | |
3166 SetCacheType(net::APP_CACHE); | |
3167 SetSimpleCacheMode(); | |
3168 BackendBasics(); | |
3169 } | |
3170 | |
3171 TEST_F(DiskCacheBackendTest, SimpleCacheKeying) { | |
3172 SetSimpleCacheMode(); | |
3173 BackendKeying(); | |
3174 } | |
3175 | |
3176 TEST_F(DiskCacheBackendTest, SimpleCacheAppCacheKeying) { | |
3177 SetSimpleCacheMode(); | |
3178 SetCacheType(net::APP_CACHE); | |
3179 BackendKeying(); | |
3180 } | |
3181 | |
3182 TEST_F(DiskCacheBackendTest, DISABLED_SimpleCacheSetSize) { | |
3183 SetSimpleCacheMode(); | |
3184 BackendSetSize(); | |
3185 } | |
3186 | |
3187 // MacOS has a default open file limit of 256 files, which is incompatible with | |
3188 // this simple cache test. | |
3189 #if defined(OS_MACOSX) | |
3190 #define SIMPLE_MAYBE_MACOS(TestName) DISABLED_ ## TestName | |
3191 #else | |
3192 #define SIMPLE_MAYBE_MACOS(TestName) TestName | |
3193 #endif | |
3194 | |
3195 TEST_F(DiskCacheBackendTest, SIMPLE_MAYBE_MACOS(SimpleCacheLoad)) { | |
3196 SetMaxSize(0x100000); | |
3197 SetSimpleCacheMode(); | |
3198 BackendLoad(); | |
3199 } | |
3200 | |
3201 TEST_F(DiskCacheBackendTest, SIMPLE_MAYBE_MACOS(SimpleCacheAppCacheLoad)) { | |
3202 SetCacheType(net::APP_CACHE); | |
3203 SetSimpleCacheMode(); | |
3204 SetMaxSize(0x100000); | |
3205 BackendLoad(); | |
3206 } | |
3207 | |
3208 TEST_F(DiskCacheBackendTest, SimpleDoomRecent) { | |
3209 SetSimpleCacheMode(); | |
3210 BackendDoomRecent(); | |
3211 } | |
3212 | |
3213 // crbug.com/330926, crbug.com/370677 | |
3214 TEST_F(DiskCacheBackendTest, DISABLED_SimpleDoomBetween) { | |
3215 SetSimpleCacheMode(); | |
3216 BackendDoomBetween(); | |
3217 } | |
3218 | |
3219 TEST_F(DiskCacheBackendTest, SimpleCacheDoomAll) { | |
3220 SetSimpleCacheMode(); | |
3221 BackendDoomAll(); | |
3222 } | |
3223 | |
3224 TEST_F(DiskCacheBackendTest, SimpleCacheAppCacheOnlyDoomAll) { | |
3225 SetCacheType(net::APP_CACHE); | |
3226 SetSimpleCacheMode(); | |
3227 BackendDoomAll(); | |
3228 } | |
3229 | |
3230 TEST_F(DiskCacheBackendTest, SimpleCacheOpenMissingFile) { | |
3231 SetSimpleCacheMode(); | |
3232 InitCache(); | |
3233 | |
3234 const char key[] = "the first key"; | |
3235 disk_cache::Entry* entry = NULL; | |
3236 | |
3237 ASSERT_EQ(net::OK, CreateEntry(key, &entry)); | |
3238 ASSERT_TRUE(entry != NULL); | |
3239 entry->Close(); | |
3240 entry = NULL; | |
3241 | |
3242 // To make sure the file creation completed we need to call open again so that | |
3243 // we block until it actually created the files. | |
3244 ASSERT_EQ(net::OK, OpenEntry(key, &entry)); | |
3245 ASSERT_TRUE(entry != NULL); | |
3246 entry->Close(); | |
3247 entry = NULL; | |
3248 | |
3249 // Delete one of the files in the entry. | |
3250 base::FilePath to_delete_file = cache_path_.AppendASCII( | |
3251 disk_cache::simple_util::GetFilenameFromKeyAndFileIndex(key, 0)); | |
3252 EXPECT_TRUE(base::PathExists(to_delete_file)); | |
3253 EXPECT_TRUE(disk_cache::DeleteCacheFile(to_delete_file)); | |
3254 | |
3255 // Failing to open the entry should delete the rest of these files. | |
3256 ASSERT_EQ(net::ERR_FAILED, OpenEntry(key, &entry)); | |
3257 | |
3258 // Confirm the rest of the files are gone. | |
3259 for (int i = 1; i < disk_cache::kSimpleEntryFileCount; ++i) { | |
3260 base::FilePath should_be_gone_file(cache_path_.AppendASCII( | |
3261 disk_cache::simple_util::GetFilenameFromKeyAndFileIndex(key, i))); | |
3262 EXPECT_FALSE(base::PathExists(should_be_gone_file)); | |
3263 } | |
3264 } | |
3265 | |
3266 TEST_F(DiskCacheBackendTest, SimpleCacheOpenBadFile) { | |
3267 SetSimpleCacheMode(); | |
3268 InitCache(); | |
3269 | |
3270 const char key[] = "the first key"; | |
3271 disk_cache::Entry* entry = NULL; | |
3272 | |
3273 ASSERT_EQ(net::OK, CreateEntry(key, &entry)); | |
3274 disk_cache::Entry* null = NULL; | |
3275 ASSERT_NE(null, entry); | |
3276 entry->Close(); | |
3277 entry = NULL; | |
3278 | |
3279 // To make sure the file creation completed we need to call open again so that | |
3280 // we block until it actually created the files. | |
3281 ASSERT_EQ(net::OK, OpenEntry(key, &entry)); | |
3282 ASSERT_NE(null, entry); | |
3283 entry->Close(); | |
3284 entry = NULL; | |
3285 | |
3286 // The entry is being closed on the Simple Cache worker pool | |
3287 disk_cache::SimpleBackendImpl::FlushWorkerPoolForTesting(); | |
3288 base::RunLoop().RunUntilIdle(); | |
3289 | |
3290 // Write an invalid header for stream 0 and stream 1. | |
3291 base::FilePath entry_file1_path = cache_path_.AppendASCII( | |
3292 disk_cache::simple_util::GetFilenameFromKeyAndFileIndex(key, 0)); | |
3293 | |
3294 disk_cache::SimpleFileHeader header; | |
3295 header.initial_magic_number = GG_UINT64_C(0xbadf00d); | |
3296 EXPECT_EQ( | |
3297 implicit_cast<int>(sizeof(header)), | |
3298 base::WriteFile(entry_file1_path, reinterpret_cast<char*>(&header), | |
3299 sizeof(header))); | |
3300 ASSERT_EQ(net::ERR_FAILED, OpenEntry(key, &entry)); | |
3301 } | |
3302 | |
3303 // Tests that the Simple Cache Backend fails to initialize with non-matching | |
3304 // file structure on disk. | |
3305 TEST_F(DiskCacheBackendTest, SimpleCacheOverBlockfileCache) { | |
3306 // Create a cache structure with the |BackendImpl|. | |
3307 InitCache(); | |
3308 disk_cache::Entry* entry; | |
3309 const int kSize = 50; | |
3310 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize)); | |
3311 CacheTestFillBuffer(buffer->data(), kSize, false); | |
3312 ASSERT_EQ(net::OK, CreateEntry("key", &entry)); | |
3313 ASSERT_EQ(0, WriteData(entry, 0, 0, buffer.get(), 0, false)); | |
3314 entry->Close(); | |
3315 cache_.reset(); | |
3316 | |
3317 // Check that the |SimpleBackendImpl| does not favor this structure. | |
3318 base::Thread cache_thread("CacheThread"); | |
3319 ASSERT_TRUE(cache_thread.StartWithOptions( | |
3320 base::Thread::Options(base::MessageLoop::TYPE_IO, 0))); | |
3321 disk_cache::SimpleBackendImpl* simple_cache = | |
3322 new disk_cache::SimpleBackendImpl( | |
3323 cache_path_, 0, net::DISK_CACHE, cache_thread.task_runner(), NULL); | |
3324 net::TestCompletionCallback cb; | |
3325 int rv = simple_cache->Init(cb.callback()); | |
3326 EXPECT_NE(net::OK, cb.GetResult(rv)); | |
3327 delete simple_cache; | |
3328 DisableIntegrityCheck(); | |
3329 } | |
3330 | |
3331 // Tests that the |BackendImpl| refuses to initialize on top of the files | |
3332 // generated by the Simple Cache Backend. | |
3333 TEST_F(DiskCacheBackendTest, BlockfileCacheOverSimpleCache) { | |
3334 // Create a cache structure with the |SimpleBackendImpl|. | |
3335 SetSimpleCacheMode(); | |
3336 InitCache(); | |
3337 disk_cache::Entry* entry; | |
3338 const int kSize = 50; | |
3339 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize)); | |
3340 CacheTestFillBuffer(buffer->data(), kSize, false); | |
3341 ASSERT_EQ(net::OK, CreateEntry("key", &entry)); | |
3342 ASSERT_EQ(0, WriteData(entry, 0, 0, buffer.get(), 0, false)); | |
3343 entry->Close(); | |
3344 cache_.reset(); | |
3345 | |
3346 // Check that the |BackendImpl| does not favor this structure. | |
3347 base::Thread cache_thread("CacheThread"); | |
3348 ASSERT_TRUE(cache_thread.StartWithOptions( | |
3349 base::Thread::Options(base::MessageLoop::TYPE_IO, 0))); | |
3350 disk_cache::BackendImpl* cache = new disk_cache::BackendImpl( | |
3351 cache_path_, base::ThreadTaskRunnerHandle::Get(), NULL); | |
3352 cache->SetUnitTestMode(); | |
3353 net::TestCompletionCallback cb; | |
3354 int rv = cache->Init(cb.callback()); | |
3355 EXPECT_NE(net::OK, cb.GetResult(rv)); | |
3356 delete cache; | |
3357 DisableIntegrityCheck(); | |
3358 } | |
3359 | |
3360 TEST_F(DiskCacheBackendTest, SimpleCacheFixEnumerators) { | |
3361 SetSimpleCacheMode(); | |
3362 BackendFixEnumerators(); | |
3363 } | |
3364 | |
3365 // Tests basic functionality of the SimpleBackend implementation of the | |
3366 // enumeration API. | |
3367 TEST_F(DiskCacheBackendTest, SimpleCacheEnumerationBasics) { | |
3368 SetSimpleCacheMode(); | |
3369 InitCache(); | |
3370 std::set<std::string> key_pool; | |
3371 ASSERT_TRUE(CreateSetOfRandomEntries(&key_pool)); | |
3372 | |
3373 // Check that enumeration returns all entries. | |
3374 std::set<std::string> keys_to_match(key_pool); | |
3375 scoped_ptr<TestIterator> iter = CreateIterator(); | |
3376 size_t count = 0; | |
3377 ASSERT_TRUE(EnumerateAndMatchKeys(-1, iter.get(), &keys_to_match, &count)); | |
3378 iter.reset(); | |
3379 EXPECT_EQ(key_pool.size(), count); | |
3380 EXPECT_TRUE(keys_to_match.empty()); | |
3381 | |
3382 // Check that opening entries does not affect enumeration. | |
3383 keys_to_match = key_pool; | |
3384 iter = CreateIterator(); | |
3385 count = 0; | |
3386 disk_cache::Entry* entry_opened_before; | |
3387 ASSERT_EQ(net::OK, OpenEntry(*(key_pool.begin()), &entry_opened_before)); | |
3388 ASSERT_TRUE(EnumerateAndMatchKeys(key_pool.size()/2, | |
3389 iter.get(), | |
3390 &keys_to_match, | |
3391 &count)); | |
3392 | |
3393 disk_cache::Entry* entry_opened_middle; | |
3394 ASSERT_EQ(net::OK, | |
3395 OpenEntry(*(keys_to_match.begin()), &entry_opened_middle)); | |
3396 ASSERT_TRUE(EnumerateAndMatchKeys(-1, iter.get(), &keys_to_match, &count)); | |
3397 iter.reset(); | |
3398 entry_opened_before->Close(); | |
3399 entry_opened_middle->Close(); | |
3400 | |
3401 EXPECT_EQ(key_pool.size(), count); | |
3402 EXPECT_TRUE(keys_to_match.empty()); | |
3403 } | |
3404 | |
3405 // Tests that the enumerations are not affected by dooming an entry in the | |
3406 // middle. | |
3407 TEST_F(DiskCacheBackendTest, SimpleCacheEnumerationWhileDoomed) { | |
3408 SetSimpleCacheMode(); | |
3409 InitCache(); | |
3410 std::set<std::string> key_pool; | |
3411 ASSERT_TRUE(CreateSetOfRandomEntries(&key_pool)); | |
3412 | |
3413 // Check that enumeration returns all entries but the doomed one. | |
3414 std::set<std::string> keys_to_match(key_pool); | |
3415 scoped_ptr<TestIterator> iter = CreateIterator(); | |
3416 size_t count = 0; | |
3417 ASSERT_TRUE(EnumerateAndMatchKeys(key_pool.size()/2, | |
3418 iter.get(), | |
3419 &keys_to_match, | |
3420 &count)); | |
3421 | |
3422 std::string key_to_delete = *(keys_to_match.begin()); | |
3423 DoomEntry(key_to_delete); | |
3424 keys_to_match.erase(key_to_delete); | |
3425 key_pool.erase(key_to_delete); | |
3426 ASSERT_TRUE(EnumerateAndMatchKeys(-1, iter.get(), &keys_to_match, &count)); | |
3427 iter.reset(); | |
3428 | |
3429 EXPECT_EQ(key_pool.size(), count); | |
3430 EXPECT_TRUE(keys_to_match.empty()); | |
3431 } | |
3432 | |
3433 // Tests that enumerations are not affected by corrupt files. | |
3434 TEST_F(DiskCacheBackendTest, SimpleCacheEnumerationCorruption) { | |
3435 SetSimpleCacheMode(); | |
3436 InitCache(); | |
3437 std::set<std::string> key_pool; | |
3438 ASSERT_TRUE(CreateSetOfRandomEntries(&key_pool)); | |
3439 | |
3440 // Create a corrupt entry. The write/read sequence ensures that the entry will | |
3441 // have been created before corrupting the platform files, in the case of | |
3442 // optimistic operations. | |
3443 const std::string key = "the key"; | |
3444 disk_cache::Entry* corrupted_entry; | |
3445 | |
3446 ASSERT_EQ(net::OK, CreateEntry(key, &corrupted_entry)); | |
3447 ASSERT_TRUE(corrupted_entry); | |
3448 const int kSize = 50; | |
3449 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize)); | |
3450 CacheTestFillBuffer(buffer->data(), kSize, false); | |
3451 ASSERT_EQ(kSize, | |
3452 WriteData(corrupted_entry, 0, 0, buffer.get(), kSize, false)); | |
3453 ASSERT_EQ(kSize, ReadData(corrupted_entry, 0, 0, buffer.get(), kSize)); | |
3454 corrupted_entry->Close(); | |
3455 | |
3456 EXPECT_TRUE(disk_cache::simple_util::CreateCorruptFileForTests( | |
3457 key, cache_path_)); | |
3458 EXPECT_EQ(key_pool.size() + 1, | |
3459 implicit_cast<size_t>(cache_->GetEntryCount())); | |
3460 | |
3461 // Check that enumeration returns all entries but the corrupt one. | |
3462 std::set<std::string> keys_to_match(key_pool); | |
3463 scoped_ptr<TestIterator> iter = CreateIterator(); | |
3464 size_t count = 0; | |
3465 ASSERT_TRUE(EnumerateAndMatchKeys(-1, iter.get(), &keys_to_match, &count)); | |
3466 iter.reset(); | |
3467 | |
3468 EXPECT_EQ(key_pool.size(), count); | |
3469 EXPECT_TRUE(keys_to_match.empty()); | |
3470 } | |
3471 | |
3472 // Tests that enumerations don't leak memory when the backend is destructed | |
3473 // mid-enumeration. | |
3474 TEST_F(DiskCacheBackendTest, SimpleCacheEnumerationDestruction) { | |
3475 SetSimpleCacheMode(); | |
3476 InitCache(); | |
3477 std::set<std::string> key_pool; | |
3478 ASSERT_TRUE(CreateSetOfRandomEntries(&key_pool)); | |
3479 | |
3480 scoped_ptr<TestIterator> iter = CreateIterator(); | |
3481 disk_cache::Entry* entry = NULL; | |
3482 ASSERT_EQ(net::OK, iter->OpenNextEntry(&entry)); | |
3483 EXPECT_TRUE(entry); | |
3484 disk_cache::ScopedEntryPtr entry_closer(entry); | |
3485 | |
3486 cache_.reset(); | |
3487 // This test passes if we don't leak memory. | |
3488 } | |
3489 | |
3490 // Tests that a SimpleCache doesn't crash when files are deleted very quickly | |
3491 // after closing. | |
3492 // NOTE: IF THIS TEST IS FLAKY THEN IT IS FAILING. See https://crbug.com/416940 | |
3493 TEST_F(DiskCacheBackendTest, SimpleCacheDeleteQuickly) { | |
3494 SetSimpleCacheMode(); | |
3495 for (int i = 0; i < 100; ++i) { | |
3496 InitCache(); | |
3497 cache_.reset(); | |
3498 EXPECT_TRUE(CleanupCacheDir()); | |
3499 } | |
3500 } | |
OLD | NEW |