| OLD | NEW |
| (Empty) |
| 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | |
| 2 // Use of this source code is governed by a BSD-style license that can be | |
| 3 // found in the LICENSE file. | |
| 4 | |
| 5 #include "base/basictypes.h" | |
| 6 #include "base/file_util.h" | |
| 7 #include "base/metrics/field_trial.h" | |
| 8 #include "base/port.h" | |
| 9 #include "base/strings/string_util.h" | |
| 10 #include "base/strings/stringprintf.h" | |
| 11 #include "base/third_party/dynamic_annotations/dynamic_annotations.h" | |
| 12 #include "base/threading/platform_thread.h" | |
| 13 #include "base/threading/thread_restrictions.h" | |
| 14 #include "net/base/cache_type.h" | |
| 15 #include "net/base/io_buffer.h" | |
| 16 #include "net/base/net_errors.h" | |
| 17 #include "net/base/test_completion_callback.h" | |
| 18 #include "net/disk_cache/blockfile/backend_impl.h" | |
| 19 #include "net/disk_cache/blockfile/entry_impl.h" | |
| 20 #include "net/disk_cache/blockfile/experiments.h" | |
| 21 #include "net/disk_cache/blockfile/mapped_file.h" | |
| 22 #include "net/disk_cache/cache_util.h" | |
| 23 #include "net/disk_cache/disk_cache_test_base.h" | |
| 24 #include "net/disk_cache/disk_cache_test_util.h" | |
| 25 #include "net/disk_cache/memory/mem_backend_impl.h" | |
| 26 #include "net/disk_cache/simple/simple_backend_impl.h" | |
| 27 #include "net/disk_cache/simple/simple_entry_format.h" | |
| 28 #include "net/disk_cache/simple/simple_test_util.h" | |
| 29 #include "net/disk_cache/simple/simple_util.h" | |
| 30 #include "net/disk_cache/tracing/tracing_cache_backend.h" | |
| 31 #include "testing/gtest/include/gtest/gtest.h" | |
| 32 | |
| 33 #if defined(OS_WIN) | |
| 34 #include "base/win/scoped_handle.h" | |
| 35 #endif | |
| 36 | |
| 37 // Define BLOCKFILE_BACKEND_IMPL_OBJ to be a disk_cache::BackendImpl* in order | |
| 38 // to use the CACHE_UMA histogram macro. | |
| 39 #define BLOCKFILE_BACKEND_IMPL_OBJ backend_ | |
| 40 #include "net/disk_cache/blockfile/histogram_macros.h" | |
| 41 | |
| 42 using base::Time; | |
| 43 | |
| 44 namespace { | |
| 45 | |
| 46 const char kExistingEntryKey[] = "existing entry key"; | |
| 47 | |
| 48 scoped_ptr<disk_cache::BackendImpl> CreateExistingEntryCache( | |
| 49 const base::Thread& cache_thread, | |
| 50 base::FilePath& cache_path) { | |
| 51 net::TestCompletionCallback cb; | |
| 52 | |
| 53 scoped_ptr<disk_cache::BackendImpl> cache(new disk_cache::BackendImpl( | |
| 54 cache_path, cache_thread.message_loop_proxy(), NULL)); | |
| 55 int rv = cache->Init(cb.callback()); | |
| 56 if (cb.GetResult(rv) != net::OK) | |
| 57 return scoped_ptr<disk_cache::BackendImpl>(); | |
| 58 | |
| 59 disk_cache::Entry* entry = NULL; | |
| 60 rv = cache->CreateEntry(kExistingEntryKey, &entry, cb.callback()); | |
| 61 if (cb.GetResult(rv) != net::OK) | |
| 62 return scoped_ptr<disk_cache::BackendImpl>(); | |
| 63 entry->Close(); | |
| 64 | |
| 65 return cache.Pass(); | |
| 66 } | |
| 67 | |
| 68 } // namespace | |
| 69 | |
| 70 // Tests that can run with different types of caches. | |
| 71 class DiskCacheBackendTest : public DiskCacheTestWithCache { | |
| 72 protected: | |
| 73 // Some utility methods: | |
| 74 | |
| 75 // Perform IO operations on the cache until there is pending IO. | |
| 76 int GeneratePendingIO(net::TestCompletionCallback* cb); | |
| 77 | |
| 78 // Adds 5 sparse entries. |doomed_start| and |doomed_end| if not NULL, | |
| 79 // will be filled with times, used by DoomEntriesSince and DoomEntriesBetween. | |
| 80 // There are 4 entries after doomed_start and 2 after doomed_end. | |
| 81 void InitSparseCache(base::Time* doomed_start, base::Time* doomed_end); | |
| 82 | |
| 83 bool CreateSetOfRandomEntries(std::set<std::string>* key_pool); | |
| 84 bool EnumerateAndMatchKeys(int max_to_open, | |
| 85 void** iter, | |
| 86 std::set<std::string>* keys_to_match, | |
| 87 size_t* count); | |
| 88 | |
| 89 // Actual tests: | |
| 90 void BackendBasics(); | |
| 91 void BackendKeying(); | |
| 92 void BackendShutdownWithPendingFileIO(bool fast); | |
| 93 void BackendShutdownWithPendingIO(bool fast); | |
| 94 void BackendShutdownWithPendingCreate(bool fast); | |
| 95 void BackendSetSize(); | |
| 96 void BackendLoad(); | |
| 97 void BackendChain(); | |
| 98 void BackendValidEntry(); | |
| 99 void BackendInvalidEntry(); | |
| 100 void BackendInvalidEntryRead(); | |
| 101 void BackendInvalidEntryWithLoad(); | |
| 102 void BackendTrimInvalidEntry(); | |
| 103 void BackendTrimInvalidEntry2(); | |
| 104 void BackendEnumerations(); | |
| 105 void BackendEnumerations2(); | |
| 106 void BackendInvalidEntryEnumeration(); | |
| 107 void BackendFixEnumerators(); | |
| 108 void BackendDoomRecent(); | |
| 109 void BackendDoomBetween(); | |
| 110 void BackendTransaction(const std::string& name, int num_entries, bool load); | |
| 111 void BackendRecoverInsert(); | |
| 112 void BackendRecoverRemove(); | |
| 113 void BackendRecoverWithEviction(); | |
| 114 void BackendInvalidEntry2(); | |
| 115 void BackendInvalidEntry3(); | |
| 116 void BackendInvalidEntry7(); | |
| 117 void BackendInvalidEntry8(); | |
| 118 void BackendInvalidEntry9(bool eviction); | |
| 119 void BackendInvalidEntry10(bool eviction); | |
| 120 void BackendInvalidEntry11(bool eviction); | |
| 121 void BackendTrimInvalidEntry12(); | |
| 122 void BackendDoomAll(); | |
| 123 void BackendDoomAll2(); | |
| 124 void BackendInvalidRankings(); | |
| 125 void BackendInvalidRankings2(); | |
| 126 void BackendDisable(); | |
| 127 void BackendDisable2(); | |
| 128 void BackendDisable3(); | |
| 129 void BackendDisable4(); | |
| 130 void TracingBackendBasics(); | |
| 131 }; | |
| 132 | |
| 133 int DiskCacheBackendTest::GeneratePendingIO(net::TestCompletionCallback* cb) { | |
| 134 if (!use_current_thread_) { | |
| 135 ADD_FAILURE(); | |
| 136 return net::ERR_FAILED; | |
| 137 } | |
| 138 | |
| 139 disk_cache::Entry* entry; | |
| 140 int rv = cache_->CreateEntry("some key", &entry, cb->callback()); | |
| 141 if (cb->GetResult(rv) != net::OK) | |
| 142 return net::ERR_CACHE_CREATE_FAILURE; | |
| 143 | |
| 144 const int kSize = 25000; | |
| 145 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize)); | |
| 146 CacheTestFillBuffer(buffer->data(), kSize, false); | |
| 147 | |
| 148 for (int i = 0; i < 10 * 1024 * 1024; i += 64 * 1024) { | |
| 149 // We are using the current thread as the cache thread because we want to | |
| 150 // be able to call directly this method to make sure that the OS (instead | |
| 151 // of us switching thread) is returning IO pending. | |
| 152 if (!simple_cache_mode_) { | |
| 153 rv = static_cast<disk_cache::EntryImpl*>(entry)->WriteDataImpl( | |
| 154 0, i, buffer.get(), kSize, cb->callback(), false); | |
| 155 } else { | |
| 156 rv = entry->WriteData(0, i, buffer.get(), kSize, cb->callback(), false); | |
| 157 } | |
| 158 | |
| 159 if (rv == net::ERR_IO_PENDING) | |
| 160 break; | |
| 161 if (rv != kSize) | |
| 162 rv = net::ERR_FAILED; | |
| 163 } | |
| 164 | |
| 165 // Don't call Close() to avoid going through the queue or we'll deadlock | |
| 166 // waiting for the operation to finish. | |
| 167 if (!simple_cache_mode_) | |
| 168 static_cast<disk_cache::EntryImpl*>(entry)->Release(); | |
| 169 else | |
| 170 entry->Close(); | |
| 171 | |
| 172 return rv; | |
| 173 } | |
| 174 | |
| 175 void DiskCacheBackendTest::InitSparseCache(base::Time* doomed_start, | |
| 176 base::Time* doomed_end) { | |
| 177 InitCache(); | |
| 178 | |
| 179 const int kSize = 50; | |
| 180 // This must be greater then MemEntryImpl::kMaxSparseEntrySize. | |
| 181 const int kOffset = 10 + 1024 * 1024; | |
| 182 | |
| 183 disk_cache::Entry* entry0 = NULL; | |
| 184 disk_cache::Entry* entry1 = NULL; | |
| 185 disk_cache::Entry* entry2 = NULL; | |
| 186 | |
| 187 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize)); | |
| 188 CacheTestFillBuffer(buffer->data(), kSize, false); | |
| 189 | |
| 190 ASSERT_EQ(net::OK, CreateEntry("zeroth", &entry0)); | |
| 191 ASSERT_EQ(kSize, WriteSparseData(entry0, 0, buffer.get(), kSize)); | |
| 192 ASSERT_EQ(kSize, | |
| 193 WriteSparseData(entry0, kOffset + kSize, buffer.get(), kSize)); | |
| 194 entry0->Close(); | |
| 195 | |
| 196 FlushQueueForTest(); | |
| 197 AddDelay(); | |
| 198 if (doomed_start) | |
| 199 *doomed_start = base::Time::Now(); | |
| 200 | |
| 201 // Order in rankings list: | |
| 202 // first_part1, first_part2, second_part1, second_part2 | |
| 203 ASSERT_EQ(net::OK, CreateEntry("first", &entry1)); | |
| 204 ASSERT_EQ(kSize, WriteSparseData(entry1, 0, buffer.get(), kSize)); | |
| 205 ASSERT_EQ(kSize, | |
| 206 WriteSparseData(entry1, kOffset + kSize, buffer.get(), kSize)); | |
| 207 entry1->Close(); | |
| 208 | |
| 209 ASSERT_EQ(net::OK, CreateEntry("second", &entry2)); | |
| 210 ASSERT_EQ(kSize, WriteSparseData(entry2, 0, buffer.get(), kSize)); | |
| 211 ASSERT_EQ(kSize, | |
| 212 WriteSparseData(entry2, kOffset + kSize, buffer.get(), kSize)); | |
| 213 entry2->Close(); | |
| 214 | |
| 215 FlushQueueForTest(); | |
| 216 AddDelay(); | |
| 217 if (doomed_end) | |
| 218 *doomed_end = base::Time::Now(); | |
| 219 | |
| 220 // Order in rankings list: | |
| 221 // third_part1, fourth_part1, third_part2, fourth_part2 | |
| 222 disk_cache::Entry* entry3 = NULL; | |
| 223 disk_cache::Entry* entry4 = NULL; | |
| 224 ASSERT_EQ(net::OK, CreateEntry("third", &entry3)); | |
| 225 ASSERT_EQ(kSize, WriteSparseData(entry3, 0, buffer.get(), kSize)); | |
| 226 ASSERT_EQ(net::OK, CreateEntry("fourth", &entry4)); | |
| 227 ASSERT_EQ(kSize, WriteSparseData(entry4, 0, buffer.get(), kSize)); | |
| 228 ASSERT_EQ(kSize, | |
| 229 WriteSparseData(entry3, kOffset + kSize, buffer.get(), kSize)); | |
| 230 ASSERT_EQ(kSize, | |
| 231 WriteSparseData(entry4, kOffset + kSize, buffer.get(), kSize)); | |
| 232 entry3->Close(); | |
| 233 entry4->Close(); | |
| 234 | |
| 235 FlushQueueForTest(); | |
| 236 AddDelay(); | |
| 237 } | |
| 238 | |
| 239 // Creates entries based on random keys. Stores these keys in |key_pool|. | |
| 240 bool DiskCacheBackendTest::CreateSetOfRandomEntries( | |
| 241 std::set<std::string>* key_pool) { | |
| 242 const int kNumEntries = 10; | |
| 243 | |
| 244 for (int i = 0; i < kNumEntries; ++i) { | |
| 245 std::string key = GenerateKey(true); | |
| 246 disk_cache::Entry* entry; | |
| 247 if (CreateEntry(key, &entry) != net::OK) | |
| 248 return false; | |
| 249 key_pool->insert(key); | |
| 250 entry->Close(); | |
| 251 } | |
| 252 return key_pool->size() == implicit_cast<size_t>(cache_->GetEntryCount()); | |
| 253 } | |
| 254 | |
| 255 // Performs iteration over the backend and checks that the keys of entries | |
| 256 // opened are in |keys_to_match|, then erases them. Up to |max_to_open| entries | |
| 257 // will be opened, if it is positive. Otherwise, iteration will continue until | |
| 258 // OpenNextEntry stops returning net::OK. | |
| 259 bool DiskCacheBackendTest::EnumerateAndMatchKeys( | |
| 260 int max_to_open, | |
| 261 void** iter, | |
| 262 std::set<std::string>* keys_to_match, | |
| 263 size_t* count) { | |
| 264 disk_cache::Entry* entry; | |
| 265 | |
| 266 while (OpenNextEntry(iter, &entry) == net::OK) { | |
| 267 if (!entry) | |
| 268 return false; | |
| 269 EXPECT_EQ(1U, keys_to_match->erase(entry->GetKey())); | |
| 270 entry->Close(); | |
| 271 ++(*count); | |
| 272 if (max_to_open >= 0 && implicit_cast<int>(*count) >= max_to_open) | |
| 273 break; | |
| 274 }; | |
| 275 | |
| 276 return true; | |
| 277 } | |
| 278 | |
| 279 void DiskCacheBackendTest::BackendBasics() { | |
| 280 InitCache(); | |
| 281 disk_cache::Entry *entry1 = NULL, *entry2 = NULL; | |
| 282 EXPECT_NE(net::OK, OpenEntry("the first key", &entry1)); | |
| 283 ASSERT_EQ(net::OK, CreateEntry("the first key", &entry1)); | |
| 284 ASSERT_TRUE(NULL != entry1); | |
| 285 entry1->Close(); | |
| 286 entry1 = NULL; | |
| 287 | |
| 288 ASSERT_EQ(net::OK, OpenEntry("the first key", &entry1)); | |
| 289 ASSERT_TRUE(NULL != entry1); | |
| 290 entry1->Close(); | |
| 291 entry1 = NULL; | |
| 292 | |
| 293 EXPECT_NE(net::OK, CreateEntry("the first key", &entry1)); | |
| 294 ASSERT_EQ(net::OK, OpenEntry("the first key", &entry1)); | |
| 295 EXPECT_NE(net::OK, OpenEntry("some other key", &entry2)); | |
| 296 ASSERT_EQ(net::OK, CreateEntry("some other key", &entry2)); | |
| 297 ASSERT_TRUE(NULL != entry1); | |
| 298 ASSERT_TRUE(NULL != entry2); | |
| 299 EXPECT_EQ(2, cache_->GetEntryCount()); | |
| 300 | |
| 301 disk_cache::Entry* entry3 = NULL; | |
| 302 ASSERT_EQ(net::OK, OpenEntry("some other key", &entry3)); | |
| 303 ASSERT_TRUE(NULL != entry3); | |
| 304 EXPECT_TRUE(entry2 == entry3); | |
| 305 EXPECT_EQ(2, cache_->GetEntryCount()); | |
| 306 | |
| 307 EXPECT_EQ(net::OK, DoomEntry("some other key")); | |
| 308 EXPECT_EQ(1, cache_->GetEntryCount()); | |
| 309 entry1->Close(); | |
| 310 entry2->Close(); | |
| 311 entry3->Close(); | |
| 312 | |
| 313 EXPECT_EQ(net::OK, DoomEntry("the first key")); | |
| 314 EXPECT_EQ(0, cache_->GetEntryCount()); | |
| 315 | |
| 316 ASSERT_EQ(net::OK, CreateEntry("the first key", &entry1)); | |
| 317 ASSERT_EQ(net::OK, CreateEntry("some other key", &entry2)); | |
| 318 entry1->Doom(); | |
| 319 entry1->Close(); | |
| 320 EXPECT_EQ(net::OK, DoomEntry("some other key")); | |
| 321 EXPECT_EQ(0, cache_->GetEntryCount()); | |
| 322 entry2->Close(); | |
| 323 } | |
| 324 | |
| 325 TEST_F(DiskCacheBackendTest, Basics) { | |
| 326 BackendBasics(); | |
| 327 } | |
| 328 | |
| 329 TEST_F(DiskCacheBackendTest, NewEvictionBasics) { | |
| 330 SetNewEviction(); | |
| 331 BackendBasics(); | |
| 332 } | |
| 333 | |
| 334 TEST_F(DiskCacheBackendTest, MemoryOnlyBasics) { | |
| 335 SetMemoryOnlyMode(); | |
| 336 BackendBasics(); | |
| 337 } | |
| 338 | |
| 339 TEST_F(DiskCacheBackendTest, AppCacheBasics) { | |
| 340 SetCacheType(net::APP_CACHE); | |
| 341 BackendBasics(); | |
| 342 } | |
| 343 | |
| 344 TEST_F(DiskCacheBackendTest, ShaderCacheBasics) { | |
| 345 SetCacheType(net::SHADER_CACHE); | |
| 346 BackendBasics(); | |
| 347 } | |
| 348 | |
| 349 void DiskCacheBackendTest::BackendKeying() { | |
| 350 InitCache(); | |
| 351 const char* kName1 = "the first key"; | |
| 352 const char* kName2 = "the first Key"; | |
| 353 disk_cache::Entry *entry1, *entry2; | |
| 354 ASSERT_EQ(net::OK, CreateEntry(kName1, &entry1)); | |
| 355 | |
| 356 ASSERT_EQ(net::OK, CreateEntry(kName2, &entry2)); | |
| 357 EXPECT_TRUE(entry1 != entry2) << "Case sensitive"; | |
| 358 entry2->Close(); | |
| 359 | |
| 360 char buffer[30]; | |
| 361 base::strlcpy(buffer, kName1, arraysize(buffer)); | |
| 362 ASSERT_EQ(net::OK, OpenEntry(buffer, &entry2)); | |
| 363 EXPECT_TRUE(entry1 == entry2); | |
| 364 entry2->Close(); | |
| 365 | |
| 366 base::strlcpy(buffer + 1, kName1, arraysize(buffer) - 1); | |
| 367 ASSERT_EQ(net::OK, OpenEntry(buffer + 1, &entry2)); | |
| 368 EXPECT_TRUE(entry1 == entry2); | |
| 369 entry2->Close(); | |
| 370 | |
| 371 base::strlcpy(buffer + 3, kName1, arraysize(buffer) - 3); | |
| 372 ASSERT_EQ(net::OK, OpenEntry(buffer + 3, &entry2)); | |
| 373 EXPECT_TRUE(entry1 == entry2); | |
| 374 entry2->Close(); | |
| 375 | |
| 376 // Now verify long keys. | |
| 377 char buffer2[20000]; | |
| 378 memset(buffer2, 's', sizeof(buffer2)); | |
| 379 buffer2[1023] = '\0'; | |
| 380 ASSERT_EQ(net::OK, CreateEntry(buffer2, &entry2)) << "key on block file"; | |
| 381 entry2->Close(); | |
| 382 | |
| 383 buffer2[1023] = 'g'; | |
| 384 buffer2[19999] = '\0'; | |
| 385 ASSERT_EQ(net::OK, CreateEntry(buffer2, &entry2)) << "key on external file"; | |
| 386 entry2->Close(); | |
| 387 entry1->Close(); | |
| 388 } | |
| 389 | |
| 390 TEST_F(DiskCacheBackendTest, Keying) { | |
| 391 BackendKeying(); | |
| 392 } | |
| 393 | |
| 394 TEST_F(DiskCacheBackendTest, NewEvictionKeying) { | |
| 395 SetNewEviction(); | |
| 396 BackendKeying(); | |
| 397 } | |
| 398 | |
| 399 TEST_F(DiskCacheBackendTest, MemoryOnlyKeying) { | |
| 400 SetMemoryOnlyMode(); | |
| 401 BackendKeying(); | |
| 402 } | |
| 403 | |
| 404 TEST_F(DiskCacheBackendTest, AppCacheKeying) { | |
| 405 SetCacheType(net::APP_CACHE); | |
| 406 BackendKeying(); | |
| 407 } | |
| 408 | |
| 409 TEST_F(DiskCacheBackendTest, ShaderCacheKeying) { | |
| 410 SetCacheType(net::SHADER_CACHE); | |
| 411 BackendKeying(); | |
| 412 } | |
| 413 | |
| 414 TEST_F(DiskCacheTest, CreateBackend) { | |
| 415 net::TestCompletionCallback cb; | |
| 416 | |
| 417 { | |
| 418 ASSERT_TRUE(CleanupCacheDir()); | |
| 419 base::Thread cache_thread("CacheThread"); | |
| 420 ASSERT_TRUE(cache_thread.StartWithOptions( | |
| 421 base::Thread::Options(base::MessageLoop::TYPE_IO, 0))); | |
| 422 | |
| 423 // Test the private factory method(s). | |
| 424 scoped_ptr<disk_cache::Backend> cache; | |
| 425 cache = disk_cache::MemBackendImpl::CreateBackend(0, NULL); | |
| 426 ASSERT_TRUE(cache.get()); | |
| 427 cache.reset(); | |
| 428 | |
| 429 // Now test the public API. | |
| 430 int rv = | |
| 431 disk_cache::CreateCacheBackend(net::DISK_CACHE, | |
| 432 net::CACHE_BACKEND_DEFAULT, | |
| 433 cache_path_, | |
| 434 0, | |
| 435 false, | |
| 436 cache_thread.message_loop_proxy().get(), | |
| 437 NULL, | |
| 438 &cache, | |
| 439 cb.callback()); | |
| 440 ASSERT_EQ(net::OK, cb.GetResult(rv)); | |
| 441 ASSERT_TRUE(cache.get()); | |
| 442 cache.reset(); | |
| 443 | |
| 444 rv = disk_cache::CreateCacheBackend(net::MEMORY_CACHE, | |
| 445 net::CACHE_BACKEND_DEFAULT, | |
| 446 base::FilePath(), 0, | |
| 447 false, NULL, NULL, &cache, | |
| 448 cb.callback()); | |
| 449 ASSERT_EQ(net::OK, cb.GetResult(rv)); | |
| 450 ASSERT_TRUE(cache.get()); | |
| 451 cache.reset(); | |
| 452 } | |
| 453 | |
| 454 base::MessageLoop::current()->RunUntilIdle(); | |
| 455 } | |
| 456 | |
| 457 // Tests that |BackendImpl| fails to initialize with a missing file. | |
| 458 TEST_F(DiskCacheBackendTest, CreateBackend_MissingFile) { | |
| 459 ASSERT_TRUE(CopyTestCache("bad_entry")); | |
| 460 base::FilePath filename = cache_path_.AppendASCII("data_1"); | |
| 461 base::DeleteFile(filename, false); | |
| 462 base::Thread cache_thread("CacheThread"); | |
| 463 ASSERT_TRUE(cache_thread.StartWithOptions( | |
| 464 base::Thread::Options(base::MessageLoop::TYPE_IO, 0))); | |
| 465 net::TestCompletionCallback cb; | |
| 466 | |
| 467 bool prev = base::ThreadRestrictions::SetIOAllowed(false); | |
| 468 scoped_ptr<disk_cache::BackendImpl> cache(new disk_cache::BackendImpl( | |
| 469 cache_path_, cache_thread.message_loop_proxy().get(), NULL)); | |
| 470 int rv = cache->Init(cb.callback()); | |
| 471 EXPECT_EQ(net::ERR_FAILED, cb.GetResult(rv)); | |
| 472 base::ThreadRestrictions::SetIOAllowed(prev); | |
| 473 | |
| 474 cache.reset(); | |
| 475 DisableIntegrityCheck(); | |
| 476 } | |
| 477 | |
| 478 TEST_F(DiskCacheBackendTest, ExternalFiles) { | |
| 479 InitCache(); | |
| 480 // First, let's create a file on the folder. | |
| 481 base::FilePath filename = cache_path_.AppendASCII("f_000001"); | |
| 482 | |
| 483 const int kSize = 50; | |
| 484 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize)); | |
| 485 CacheTestFillBuffer(buffer1->data(), kSize, false); | |
| 486 ASSERT_EQ(kSize, file_util::WriteFile(filename, buffer1->data(), kSize)); | |
| 487 | |
| 488 // Now let's create a file with the cache. | |
| 489 disk_cache::Entry* entry; | |
| 490 ASSERT_EQ(net::OK, CreateEntry("key", &entry)); | |
| 491 ASSERT_EQ(0, WriteData(entry, 0, 20000, buffer1.get(), 0, false)); | |
| 492 entry->Close(); | |
| 493 | |
| 494 // And verify that the first file is still there. | |
| 495 scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize)); | |
| 496 ASSERT_EQ(kSize, base::ReadFile(filename, buffer2->data(), kSize)); | |
| 497 EXPECT_EQ(0, memcmp(buffer1->data(), buffer2->data(), kSize)); | |
| 498 } | |
| 499 | |
| 500 // Tests that we deal with file-level pending operations at destruction time. | |
| 501 void DiskCacheBackendTest::BackendShutdownWithPendingFileIO(bool fast) { | |
| 502 ASSERT_TRUE(CleanupCacheDir()); | |
| 503 uint32 flags = disk_cache::kNoBuffering; | |
| 504 if (!fast) | |
| 505 flags |= disk_cache::kNoRandom; | |
| 506 | |
| 507 UseCurrentThread(); | |
| 508 CreateBackend(flags, NULL); | |
| 509 | |
| 510 net::TestCompletionCallback cb; | |
| 511 int rv = GeneratePendingIO(&cb); | |
| 512 | |
| 513 // The cache destructor will see one pending operation here. | |
| 514 cache_.reset(); | |
| 515 | |
| 516 if (rv == net::ERR_IO_PENDING) { | |
| 517 if (fast || simple_cache_mode_) | |
| 518 EXPECT_FALSE(cb.have_result()); | |
| 519 else | |
| 520 EXPECT_TRUE(cb.have_result()); | |
| 521 } | |
| 522 | |
| 523 base::MessageLoop::current()->RunUntilIdle(); | |
| 524 | |
| 525 #if !defined(OS_IOS) | |
| 526 // Wait for the actual operation to complete, or we'll keep a file handle that | |
| 527 // may cause issues later. Note that on iOS systems even though this test | |
| 528 // uses a single thread, the actual IO is posted to a worker thread and the | |
| 529 // cache destructor breaks the link to reach cb when the operation completes. | |
| 530 rv = cb.GetResult(rv); | |
| 531 #endif | |
| 532 } | |
| 533 | |
| 534 TEST_F(DiskCacheBackendTest, ShutdownWithPendingFileIO) { | |
| 535 BackendShutdownWithPendingFileIO(false); | |
| 536 } | |
| 537 | |
| 538 // Here and below, tests that simulate crashes are not compiled in LeakSanitizer | |
| 539 // builds because they contain a lot of intentional memory leaks. | |
| 540 // The wrapper scripts used to run tests under Valgrind Memcheck will also | |
| 541 // disable these tests. See: | |
| 542 // tools/valgrind/gtest_exclude/net_unittests.gtest-memcheck.txt | |
| 543 #if !defined(LEAK_SANITIZER) | |
| 544 // We'll be leaking from this test. | |
| 545 TEST_F(DiskCacheBackendTest, ShutdownWithPendingFileIO_Fast) { | |
| 546 // The integrity test sets kNoRandom so there's a version mismatch if we don't | |
| 547 // force new eviction. | |
| 548 SetNewEviction(); | |
| 549 BackendShutdownWithPendingFileIO(true); | |
| 550 } | |
| 551 #endif | |
| 552 | |
| 553 // See crbug.com/330074 | |
| 554 #if !defined(OS_IOS) | |
| 555 // Tests that one cache instance is not affected by another one going away. | |
| 556 TEST_F(DiskCacheBackendTest, MultipleInstancesWithPendingFileIO) { | |
| 557 base::ScopedTempDir store; | |
| 558 ASSERT_TRUE(store.CreateUniqueTempDir()); | |
| 559 | |
| 560 net::TestCompletionCallback cb; | |
| 561 scoped_ptr<disk_cache::Backend> extra_cache; | |
| 562 int rv = disk_cache::CreateCacheBackend( | |
| 563 net::DISK_CACHE, net::CACHE_BACKEND_DEFAULT, store.path(), 0, | |
| 564 false, base::MessageLoopProxy::current().get(), NULL, | |
| 565 &extra_cache, cb.callback()); | |
| 566 ASSERT_EQ(net::OK, cb.GetResult(rv)); | |
| 567 ASSERT_TRUE(extra_cache.get() != NULL); | |
| 568 | |
| 569 ASSERT_TRUE(CleanupCacheDir()); | |
| 570 SetNewEviction(); // Match the expected behavior for integrity verification. | |
| 571 UseCurrentThread(); | |
| 572 | |
| 573 CreateBackend(disk_cache::kNoBuffering, NULL); | |
| 574 rv = GeneratePendingIO(&cb); | |
| 575 | |
| 576 // cache_ has a pending operation, and extra_cache will go away. | |
| 577 extra_cache.reset(); | |
| 578 | |
| 579 if (rv == net::ERR_IO_PENDING) | |
| 580 EXPECT_FALSE(cb.have_result()); | |
| 581 | |
| 582 base::MessageLoop::current()->RunUntilIdle(); | |
| 583 | |
| 584 // Wait for the actual operation to complete, or we'll keep a file handle that | |
| 585 // may cause issues later. | |
| 586 rv = cb.GetResult(rv); | |
| 587 } | |
| 588 #endif | |
| 589 | |
| 590 // Tests that we deal with background-thread pending operations. | |
| 591 void DiskCacheBackendTest::BackendShutdownWithPendingIO(bool fast) { | |
| 592 net::TestCompletionCallback cb; | |
| 593 | |
| 594 { | |
| 595 ASSERT_TRUE(CleanupCacheDir()); | |
| 596 base::Thread cache_thread("CacheThread"); | |
| 597 ASSERT_TRUE(cache_thread.StartWithOptions( | |
| 598 base::Thread::Options(base::MessageLoop::TYPE_IO, 0))); | |
| 599 | |
| 600 uint32 flags = disk_cache::kNoBuffering; | |
| 601 if (!fast) | |
| 602 flags |= disk_cache::kNoRandom; | |
| 603 | |
| 604 CreateBackend(flags, &cache_thread); | |
| 605 | |
| 606 disk_cache::Entry* entry; | |
| 607 int rv = cache_->CreateEntry("some key", &entry, cb.callback()); | |
| 608 ASSERT_EQ(net::OK, cb.GetResult(rv)); | |
| 609 | |
| 610 entry->Close(); | |
| 611 | |
| 612 // The cache destructor will see one pending operation here. | |
| 613 cache_.reset(); | |
| 614 } | |
| 615 | |
| 616 base::MessageLoop::current()->RunUntilIdle(); | |
| 617 } | |
| 618 | |
| 619 TEST_F(DiskCacheBackendTest, ShutdownWithPendingIO) { | |
| 620 BackendShutdownWithPendingIO(false); | |
| 621 } | |
| 622 | |
| 623 #if !defined(LEAK_SANITIZER) | |
| 624 // We'll be leaking from this test. | |
| 625 TEST_F(DiskCacheBackendTest, ShutdownWithPendingIO_Fast) { | |
| 626 // The integrity test sets kNoRandom so there's a version mismatch if we don't | |
| 627 // force new eviction. | |
| 628 SetNewEviction(); | |
| 629 BackendShutdownWithPendingIO(true); | |
| 630 } | |
| 631 #endif | |
| 632 | |
| 633 // Tests that we deal with create-type pending operations. | |
| 634 void DiskCacheBackendTest::BackendShutdownWithPendingCreate(bool fast) { | |
| 635 net::TestCompletionCallback cb; | |
| 636 | |
| 637 { | |
| 638 ASSERT_TRUE(CleanupCacheDir()); | |
| 639 base::Thread cache_thread("CacheThread"); | |
| 640 ASSERT_TRUE(cache_thread.StartWithOptions( | |
| 641 base::Thread::Options(base::MessageLoop::TYPE_IO, 0))); | |
| 642 | |
| 643 disk_cache::BackendFlags flags = | |
| 644 fast ? disk_cache::kNone : disk_cache::kNoRandom; | |
| 645 CreateBackend(flags, &cache_thread); | |
| 646 | |
| 647 disk_cache::Entry* entry; | |
| 648 int rv = cache_->CreateEntry("some key", &entry, cb.callback()); | |
| 649 ASSERT_EQ(net::ERR_IO_PENDING, rv); | |
| 650 | |
| 651 cache_.reset(); | |
| 652 EXPECT_FALSE(cb.have_result()); | |
| 653 } | |
| 654 | |
| 655 base::MessageLoop::current()->RunUntilIdle(); | |
| 656 } | |
| 657 | |
| 658 TEST_F(DiskCacheBackendTest, ShutdownWithPendingCreate) { | |
| 659 BackendShutdownWithPendingCreate(false); | |
| 660 } | |
| 661 | |
| 662 #if !defined(LEAK_SANITIZER) | |
| 663 // We'll be leaking an entry from this test. | |
| 664 TEST_F(DiskCacheBackendTest, ShutdownWithPendingCreate_Fast) { | |
| 665 // The integrity test sets kNoRandom so there's a version mismatch if we don't | |
| 666 // force new eviction. | |
| 667 SetNewEviction(); | |
| 668 BackendShutdownWithPendingCreate(true); | |
| 669 } | |
| 670 #endif | |
| 671 | |
| 672 TEST_F(DiskCacheTest, TruncatedIndex) { | |
| 673 ASSERT_TRUE(CleanupCacheDir()); | |
| 674 base::FilePath index = cache_path_.AppendASCII("index"); | |
| 675 ASSERT_EQ(5, file_util::WriteFile(index, "hello", 5)); | |
| 676 | |
| 677 base::Thread cache_thread("CacheThread"); | |
| 678 ASSERT_TRUE(cache_thread.StartWithOptions( | |
| 679 base::Thread::Options(base::MessageLoop::TYPE_IO, 0))); | |
| 680 net::TestCompletionCallback cb; | |
| 681 | |
| 682 scoped_ptr<disk_cache::Backend> backend; | |
| 683 int rv = | |
| 684 disk_cache::CreateCacheBackend(net::DISK_CACHE, | |
| 685 net::CACHE_BACKEND_BLOCKFILE, | |
| 686 cache_path_, | |
| 687 0, | |
| 688 false, | |
| 689 cache_thread.message_loop_proxy().get(), | |
| 690 NULL, | |
| 691 &backend, | |
| 692 cb.callback()); | |
| 693 ASSERT_NE(net::OK, cb.GetResult(rv)); | |
| 694 | |
| 695 ASSERT_FALSE(backend); | |
| 696 } | |
| 697 | |
| 698 void DiskCacheBackendTest::BackendSetSize() { | |
| 699 const int cache_size = 0x10000; // 64 kB | |
| 700 SetMaxSize(cache_size); | |
| 701 InitCache(); | |
| 702 | |
| 703 std::string first("some key"); | |
| 704 std::string second("something else"); | |
| 705 disk_cache::Entry* entry; | |
| 706 ASSERT_EQ(net::OK, CreateEntry(first, &entry)); | |
| 707 | |
| 708 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(cache_size)); | |
| 709 memset(buffer->data(), 0, cache_size); | |
| 710 EXPECT_EQ(cache_size / 10, | |
| 711 WriteData(entry, 0, 0, buffer.get(), cache_size / 10, false)) | |
| 712 << "normal file"; | |
| 713 | |
| 714 EXPECT_EQ(net::ERR_FAILED, | |
| 715 WriteData(entry, 1, 0, buffer.get(), cache_size / 5, false)) | |
| 716 << "file size above the limit"; | |
| 717 | |
| 718 // By doubling the total size, we make this file cacheable. | |
| 719 SetMaxSize(cache_size * 2); | |
| 720 EXPECT_EQ(cache_size / 5, | |
| 721 WriteData(entry, 1, 0, buffer.get(), cache_size / 5, false)); | |
| 722 | |
| 723 // Let's fill up the cache!. | |
| 724 SetMaxSize(cache_size * 10); | |
| 725 EXPECT_EQ(cache_size * 3 / 4, | |
| 726 WriteData(entry, 0, 0, buffer.get(), cache_size * 3 / 4, false)); | |
| 727 entry->Close(); | |
| 728 FlushQueueForTest(); | |
| 729 | |
| 730 SetMaxSize(cache_size); | |
| 731 | |
| 732 // The cache is 95% full. | |
| 733 | |
| 734 ASSERT_EQ(net::OK, CreateEntry(second, &entry)); | |
| 735 EXPECT_EQ(cache_size / 10, | |
| 736 WriteData(entry, 0, 0, buffer.get(), cache_size / 10, false)); | |
| 737 | |
| 738 disk_cache::Entry* entry2; | |
| 739 ASSERT_EQ(net::OK, CreateEntry("an extra key", &entry2)); | |
| 740 EXPECT_EQ(cache_size / 10, | |
| 741 WriteData(entry2, 0, 0, buffer.get(), cache_size / 10, false)); | |
| 742 entry2->Close(); // This will trigger the cache trim. | |
| 743 | |
| 744 EXPECT_NE(net::OK, OpenEntry(first, &entry2)); | |
| 745 | |
| 746 FlushQueueForTest(); // Make sure that we are done trimming the cache. | |
| 747 FlushQueueForTest(); // We may have posted two tasks to evict stuff. | |
| 748 | |
| 749 entry->Close(); | |
| 750 ASSERT_EQ(net::OK, OpenEntry(second, &entry)); | |
| 751 EXPECT_EQ(cache_size / 10, entry->GetDataSize(0)); | |
| 752 entry->Close(); | |
| 753 } | |
| 754 | |
| 755 TEST_F(DiskCacheBackendTest, SetSize) { | |
| 756 BackendSetSize(); | |
| 757 } | |
| 758 | |
| 759 TEST_F(DiskCacheBackendTest, NewEvictionSetSize) { | |
| 760 SetNewEviction(); | |
| 761 BackendSetSize(); | |
| 762 } | |
| 763 | |
| 764 TEST_F(DiskCacheBackendTest, MemoryOnlySetSize) { | |
| 765 SetMemoryOnlyMode(); | |
| 766 BackendSetSize(); | |
| 767 } | |
| 768 | |
| 769 void DiskCacheBackendTest::BackendLoad() { | |
| 770 InitCache(); | |
| 771 int seed = static_cast<int>(Time::Now().ToInternalValue()); | |
| 772 srand(seed); | |
| 773 | |
| 774 disk_cache::Entry* entries[100]; | |
| 775 for (int i = 0; i < 100; i++) { | |
| 776 std::string key = GenerateKey(true); | |
| 777 ASSERT_EQ(net::OK, CreateEntry(key, &entries[i])); | |
| 778 } | |
| 779 EXPECT_EQ(100, cache_->GetEntryCount()); | |
| 780 | |
| 781 for (int i = 0; i < 100; i++) { | |
| 782 int source1 = rand() % 100; | |
| 783 int source2 = rand() % 100; | |
| 784 disk_cache::Entry* temp = entries[source1]; | |
| 785 entries[source1] = entries[source2]; | |
| 786 entries[source2] = temp; | |
| 787 } | |
| 788 | |
| 789 for (int i = 0; i < 100; i++) { | |
| 790 disk_cache::Entry* entry; | |
| 791 ASSERT_EQ(net::OK, OpenEntry(entries[i]->GetKey(), &entry)); | |
| 792 EXPECT_TRUE(entry == entries[i]); | |
| 793 entry->Close(); | |
| 794 entries[i]->Doom(); | |
| 795 entries[i]->Close(); | |
| 796 } | |
| 797 FlushQueueForTest(); | |
| 798 EXPECT_EQ(0, cache_->GetEntryCount()); | |
| 799 } | |
| 800 | |
| 801 TEST_F(DiskCacheBackendTest, Load) { | |
| 802 // Work with a tiny index table (16 entries) | |
| 803 SetMask(0xf); | |
| 804 SetMaxSize(0x100000); | |
| 805 BackendLoad(); | |
| 806 } | |
| 807 | |
| 808 TEST_F(DiskCacheBackendTest, NewEvictionLoad) { | |
| 809 SetNewEviction(); | |
| 810 // Work with a tiny index table (16 entries) | |
| 811 SetMask(0xf); | |
| 812 SetMaxSize(0x100000); | |
| 813 BackendLoad(); | |
| 814 } | |
| 815 | |
| 816 TEST_F(DiskCacheBackendTest, MemoryOnlyLoad) { | |
| 817 SetMaxSize(0x100000); | |
| 818 SetMemoryOnlyMode(); | |
| 819 BackendLoad(); | |
| 820 } | |
| 821 | |
| 822 TEST_F(DiskCacheBackendTest, AppCacheLoad) { | |
| 823 SetCacheType(net::APP_CACHE); | |
| 824 // Work with a tiny index table (16 entries) | |
| 825 SetMask(0xf); | |
| 826 SetMaxSize(0x100000); | |
| 827 BackendLoad(); | |
| 828 } | |
| 829 | |
| 830 TEST_F(DiskCacheBackendTest, ShaderCacheLoad) { | |
| 831 SetCacheType(net::SHADER_CACHE); | |
| 832 // Work with a tiny index table (16 entries) | |
| 833 SetMask(0xf); | |
| 834 SetMaxSize(0x100000); | |
| 835 BackendLoad(); | |
| 836 } | |
| 837 | |
| 838 // Tests the chaining of an entry to the current head. | |
| 839 void DiskCacheBackendTest::BackendChain() { | |
| 840 SetMask(0x1); // 2-entry table. | |
| 841 SetMaxSize(0x3000); // 12 kB. | |
| 842 InitCache(); | |
| 843 | |
| 844 disk_cache::Entry* entry; | |
| 845 ASSERT_EQ(net::OK, CreateEntry("The first key", &entry)); | |
| 846 entry->Close(); | |
| 847 ASSERT_EQ(net::OK, CreateEntry("The Second key", &entry)); | |
| 848 entry->Close(); | |
| 849 } | |
| 850 | |
| 851 TEST_F(DiskCacheBackendTest, Chain) { | |
| 852 BackendChain(); | |
| 853 } | |
| 854 | |
| 855 TEST_F(DiskCacheBackendTest, NewEvictionChain) { | |
| 856 SetNewEviction(); | |
| 857 BackendChain(); | |
| 858 } | |
| 859 | |
| 860 TEST_F(DiskCacheBackendTest, AppCacheChain) { | |
| 861 SetCacheType(net::APP_CACHE); | |
| 862 BackendChain(); | |
| 863 } | |
| 864 | |
| 865 TEST_F(DiskCacheBackendTest, ShaderCacheChain) { | |
| 866 SetCacheType(net::SHADER_CACHE); | |
| 867 BackendChain(); | |
| 868 } | |
| 869 | |
| 870 TEST_F(DiskCacheBackendTest, NewEvictionTrim) { | |
| 871 SetNewEviction(); | |
| 872 InitCache(); | |
| 873 | |
| 874 disk_cache::Entry* entry; | |
| 875 for (int i = 0; i < 100; i++) { | |
| 876 std::string name(base::StringPrintf("Key %d", i)); | |
| 877 ASSERT_EQ(net::OK, CreateEntry(name, &entry)); | |
| 878 entry->Close(); | |
| 879 if (i < 90) { | |
| 880 // Entries 0 to 89 are in list 1; 90 to 99 are in list 0. | |
| 881 ASSERT_EQ(net::OK, OpenEntry(name, &entry)); | |
| 882 entry->Close(); | |
| 883 } | |
| 884 } | |
| 885 | |
| 886 // The first eviction must come from list 1 (10% limit), the second must come | |
| 887 // from list 0. | |
| 888 TrimForTest(false); | |
| 889 EXPECT_NE(net::OK, OpenEntry("Key 0", &entry)); | |
| 890 TrimForTest(false); | |
| 891 EXPECT_NE(net::OK, OpenEntry("Key 90", &entry)); | |
| 892 | |
| 893 // Double check that we still have the list tails. | |
| 894 ASSERT_EQ(net::OK, OpenEntry("Key 1", &entry)); | |
| 895 entry->Close(); | |
| 896 ASSERT_EQ(net::OK, OpenEntry("Key 91", &entry)); | |
| 897 entry->Close(); | |
| 898 } | |
| 899 | |
| 900 // Before looking for invalid entries, let's check a valid entry. | |
| 901 void DiskCacheBackendTest::BackendValidEntry() { | |
| 902 InitCache(); | |
| 903 | |
| 904 std::string key("Some key"); | |
| 905 disk_cache::Entry* entry; | |
| 906 ASSERT_EQ(net::OK, CreateEntry(key, &entry)); | |
| 907 | |
| 908 const int kSize = 50; | |
| 909 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize)); | |
| 910 memset(buffer1->data(), 0, kSize); | |
| 911 base::strlcpy(buffer1->data(), "And the data to save", kSize); | |
| 912 EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer1.get(), kSize, false)); | |
| 913 entry->Close(); | |
| 914 SimulateCrash(); | |
| 915 | |
| 916 ASSERT_EQ(net::OK, OpenEntry(key, &entry)); | |
| 917 | |
| 918 scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize)); | |
| 919 memset(buffer2->data(), 0, kSize); | |
| 920 EXPECT_EQ(kSize, ReadData(entry, 0, 0, buffer2.get(), kSize)); | |
| 921 entry->Close(); | |
| 922 EXPECT_STREQ(buffer1->data(), buffer2->data()); | |
| 923 } | |
| 924 | |
| 925 TEST_F(DiskCacheBackendTest, ValidEntry) { | |
| 926 BackendValidEntry(); | |
| 927 } | |
| 928 | |
| 929 TEST_F(DiskCacheBackendTest, NewEvictionValidEntry) { | |
| 930 SetNewEviction(); | |
| 931 BackendValidEntry(); | |
| 932 } | |
| 933 | |
| 934 // The same logic of the previous test (ValidEntry), but this time force the | |
| 935 // entry to be invalid, simulating a crash in the middle. | |
| 936 // We'll be leaking memory from this test. | |
| 937 void DiskCacheBackendTest::BackendInvalidEntry() { | |
| 938 InitCache(); | |
| 939 | |
| 940 std::string key("Some key"); | |
| 941 disk_cache::Entry* entry; | |
| 942 ASSERT_EQ(net::OK, CreateEntry(key, &entry)); | |
| 943 | |
| 944 const int kSize = 50; | |
| 945 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize)); | |
| 946 memset(buffer->data(), 0, kSize); | |
| 947 base::strlcpy(buffer->data(), "And the data to save", kSize); | |
| 948 EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false)); | |
| 949 SimulateCrash(); | |
| 950 | |
| 951 EXPECT_NE(net::OK, OpenEntry(key, &entry)); | |
| 952 EXPECT_EQ(0, cache_->GetEntryCount()); | |
| 953 } | |
| 954 | |
| 955 #if !defined(LEAK_SANITIZER) | |
| 956 // We'll be leaking memory from this test. | |
| 957 TEST_F(DiskCacheBackendTest, InvalidEntry) { | |
| 958 BackendInvalidEntry(); | |
| 959 } | |
| 960 | |
| 961 // We'll be leaking memory from this test. | |
| 962 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntry) { | |
| 963 SetNewEviction(); | |
| 964 BackendInvalidEntry(); | |
| 965 } | |
| 966 | |
| 967 // We'll be leaking memory from this test. | |
| 968 TEST_F(DiskCacheBackendTest, AppCacheInvalidEntry) { | |
| 969 SetCacheType(net::APP_CACHE); | |
| 970 BackendInvalidEntry(); | |
| 971 } | |
| 972 | |
| 973 // We'll be leaking memory from this test. | |
| 974 TEST_F(DiskCacheBackendTest, ShaderCacheInvalidEntry) { | |
| 975 SetCacheType(net::SHADER_CACHE); | |
| 976 BackendInvalidEntry(); | |
| 977 } | |
| 978 | |
| 979 // Almost the same test, but this time crash the cache after reading an entry. | |
| 980 // We'll be leaking memory from this test. | |
| 981 void DiskCacheBackendTest::BackendInvalidEntryRead() { | |
| 982 InitCache(); | |
| 983 | |
| 984 std::string key("Some key"); | |
| 985 disk_cache::Entry* entry; | |
| 986 ASSERT_EQ(net::OK, CreateEntry(key, &entry)); | |
| 987 | |
| 988 const int kSize = 50; | |
| 989 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize)); | |
| 990 memset(buffer->data(), 0, kSize); | |
| 991 base::strlcpy(buffer->data(), "And the data to save", kSize); | |
| 992 EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false)); | |
| 993 entry->Close(); | |
| 994 ASSERT_EQ(net::OK, OpenEntry(key, &entry)); | |
| 995 EXPECT_EQ(kSize, ReadData(entry, 0, 0, buffer.get(), kSize)); | |
| 996 | |
| 997 SimulateCrash(); | |
| 998 | |
| 999 if (type_ == net::APP_CACHE) { | |
| 1000 // Reading an entry and crashing should not make it dirty. | |
| 1001 ASSERT_EQ(net::OK, OpenEntry(key, &entry)); | |
| 1002 EXPECT_EQ(1, cache_->GetEntryCount()); | |
| 1003 entry->Close(); | |
| 1004 } else { | |
| 1005 EXPECT_NE(net::OK, OpenEntry(key, &entry)); | |
| 1006 EXPECT_EQ(0, cache_->GetEntryCount()); | |
| 1007 } | |
| 1008 } | |
| 1009 | |
| 1010 // We'll be leaking memory from this test. | |
| 1011 TEST_F(DiskCacheBackendTest, InvalidEntryRead) { | |
| 1012 BackendInvalidEntryRead(); | |
| 1013 } | |
| 1014 | |
| 1015 // We'll be leaking memory from this test. | |
| 1016 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntryRead) { | |
| 1017 SetNewEviction(); | |
| 1018 BackendInvalidEntryRead(); | |
| 1019 } | |
| 1020 | |
| 1021 // We'll be leaking memory from this test. | |
| 1022 TEST_F(DiskCacheBackendTest, AppCacheInvalidEntryRead) { | |
| 1023 SetCacheType(net::APP_CACHE); | |
| 1024 BackendInvalidEntryRead(); | |
| 1025 } | |
| 1026 | |
| 1027 // We'll be leaking memory from this test. | |
| 1028 TEST_F(DiskCacheBackendTest, ShaderCacheInvalidEntryRead) { | |
| 1029 SetCacheType(net::SHADER_CACHE); | |
| 1030 BackendInvalidEntryRead(); | |
| 1031 } | |
| 1032 | |
| 1033 // We'll be leaking memory from this test. | |
| 1034 void DiskCacheBackendTest::BackendInvalidEntryWithLoad() { | |
| 1035 // Work with a tiny index table (16 entries) | |
| 1036 SetMask(0xf); | |
| 1037 SetMaxSize(0x100000); | |
| 1038 InitCache(); | |
| 1039 | |
| 1040 int seed = static_cast<int>(Time::Now().ToInternalValue()); | |
| 1041 srand(seed); | |
| 1042 | |
| 1043 const int kNumEntries = 100; | |
| 1044 disk_cache::Entry* entries[kNumEntries]; | |
| 1045 for (int i = 0; i < kNumEntries; i++) { | |
| 1046 std::string key = GenerateKey(true); | |
| 1047 ASSERT_EQ(net::OK, CreateEntry(key, &entries[i])); | |
| 1048 } | |
| 1049 EXPECT_EQ(kNumEntries, cache_->GetEntryCount()); | |
| 1050 | |
| 1051 for (int i = 0; i < kNumEntries; i++) { | |
| 1052 int source1 = rand() % kNumEntries; | |
| 1053 int source2 = rand() % kNumEntries; | |
| 1054 disk_cache::Entry* temp = entries[source1]; | |
| 1055 entries[source1] = entries[source2]; | |
| 1056 entries[source2] = temp; | |
| 1057 } | |
| 1058 | |
| 1059 std::string keys[kNumEntries]; | |
| 1060 for (int i = 0; i < kNumEntries; i++) { | |
| 1061 keys[i] = entries[i]->GetKey(); | |
| 1062 if (i < kNumEntries / 2) | |
| 1063 entries[i]->Close(); | |
| 1064 } | |
| 1065 | |
| 1066 SimulateCrash(); | |
| 1067 | |
| 1068 for (int i = kNumEntries / 2; i < kNumEntries; i++) { | |
| 1069 disk_cache::Entry* entry; | |
| 1070 EXPECT_NE(net::OK, OpenEntry(keys[i], &entry)); | |
| 1071 } | |
| 1072 | |
| 1073 for (int i = 0; i < kNumEntries / 2; i++) { | |
| 1074 disk_cache::Entry* entry; | |
| 1075 ASSERT_EQ(net::OK, OpenEntry(keys[i], &entry)); | |
| 1076 entry->Close(); | |
| 1077 } | |
| 1078 | |
| 1079 EXPECT_EQ(kNumEntries / 2, cache_->GetEntryCount()); | |
| 1080 } | |
| 1081 | |
| 1082 // We'll be leaking memory from this test. | |
| 1083 TEST_F(DiskCacheBackendTest, InvalidEntryWithLoad) { | |
| 1084 BackendInvalidEntryWithLoad(); | |
| 1085 } | |
| 1086 | |
| 1087 // We'll be leaking memory from this test. | |
| 1088 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntryWithLoad) { | |
| 1089 SetNewEviction(); | |
| 1090 BackendInvalidEntryWithLoad(); | |
| 1091 } | |
| 1092 | |
| 1093 // We'll be leaking memory from this test. | |
| 1094 TEST_F(DiskCacheBackendTest, AppCacheInvalidEntryWithLoad) { | |
| 1095 SetCacheType(net::APP_CACHE); | |
| 1096 BackendInvalidEntryWithLoad(); | |
| 1097 } | |
| 1098 | |
| 1099 // We'll be leaking memory from this test. | |
| 1100 TEST_F(DiskCacheBackendTest, ShaderCacheInvalidEntryWithLoad) { | |
| 1101 SetCacheType(net::SHADER_CACHE); | |
| 1102 BackendInvalidEntryWithLoad(); | |
| 1103 } | |
| 1104 | |
| 1105 // We'll be leaking memory from this test. | |
| 1106 void DiskCacheBackendTest::BackendTrimInvalidEntry() { | |
| 1107 const int kSize = 0x3000; // 12 kB | |
| 1108 SetMaxSize(kSize * 10); | |
| 1109 InitCache(); | |
| 1110 | |
| 1111 std::string first("some key"); | |
| 1112 std::string second("something else"); | |
| 1113 disk_cache::Entry* entry; | |
| 1114 ASSERT_EQ(net::OK, CreateEntry(first, &entry)); | |
| 1115 | |
| 1116 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize)); | |
| 1117 memset(buffer->data(), 0, kSize); | |
| 1118 EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false)); | |
| 1119 | |
| 1120 // Simulate a crash. | |
| 1121 SimulateCrash(); | |
| 1122 | |
| 1123 ASSERT_EQ(net::OK, CreateEntry(second, &entry)); | |
| 1124 EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false)); | |
| 1125 | |
| 1126 EXPECT_EQ(2, cache_->GetEntryCount()); | |
| 1127 SetMaxSize(kSize); | |
| 1128 entry->Close(); // Trim the cache. | |
| 1129 FlushQueueForTest(); | |
| 1130 | |
| 1131 // If we evicted the entry in less than 20mS, we have one entry in the cache; | |
| 1132 // if it took more than that, we posted a task and we'll delete the second | |
| 1133 // entry too. | |
| 1134 base::MessageLoop::current()->RunUntilIdle(); | |
| 1135 | |
| 1136 // This may be not thread-safe in general, but for now it's OK so add some | |
| 1137 // ThreadSanitizer annotations to ignore data races on cache_. | |
| 1138 // See http://crbug.com/55970 | |
| 1139 ANNOTATE_IGNORE_READS_BEGIN(); | |
| 1140 EXPECT_GE(1, cache_->GetEntryCount()); | |
| 1141 ANNOTATE_IGNORE_READS_END(); | |
| 1142 | |
| 1143 EXPECT_NE(net::OK, OpenEntry(first, &entry)); | |
| 1144 } | |
| 1145 | |
| 1146 // We'll be leaking memory from this test. | |
| 1147 TEST_F(DiskCacheBackendTest, TrimInvalidEntry) { | |
| 1148 BackendTrimInvalidEntry(); | |
| 1149 } | |
| 1150 | |
| 1151 // We'll be leaking memory from this test. | |
| 1152 TEST_F(DiskCacheBackendTest, NewEvictionTrimInvalidEntry) { | |
| 1153 SetNewEviction(); | |
| 1154 BackendTrimInvalidEntry(); | |
| 1155 } | |
| 1156 | |
| 1157 // We'll be leaking memory from this test. | |
| 1158 void DiskCacheBackendTest::BackendTrimInvalidEntry2() { | |
| 1159 SetMask(0xf); // 16-entry table. | |
| 1160 | |
| 1161 const int kSize = 0x3000; // 12 kB | |
| 1162 SetMaxSize(kSize * 40); | |
| 1163 InitCache(); | |
| 1164 | |
| 1165 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize)); | |
| 1166 memset(buffer->data(), 0, kSize); | |
| 1167 disk_cache::Entry* entry; | |
| 1168 | |
| 1169 // Writing 32 entries to this cache chains most of them. | |
| 1170 for (int i = 0; i < 32; i++) { | |
| 1171 std::string key(base::StringPrintf("some key %d", i)); | |
| 1172 ASSERT_EQ(net::OK, CreateEntry(key, &entry)); | |
| 1173 EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false)); | |
| 1174 entry->Close(); | |
| 1175 ASSERT_EQ(net::OK, OpenEntry(key, &entry)); | |
| 1176 // Note that we are not closing the entries. | |
| 1177 } | |
| 1178 | |
| 1179 // Simulate a crash. | |
| 1180 SimulateCrash(); | |
| 1181 | |
| 1182 ASSERT_EQ(net::OK, CreateEntry("Something else", &entry)); | |
| 1183 EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false)); | |
| 1184 | |
| 1185 FlushQueueForTest(); | |
| 1186 EXPECT_EQ(33, cache_->GetEntryCount()); | |
| 1187 SetMaxSize(kSize); | |
| 1188 | |
| 1189 // For the new eviction code, all corrupt entries are on the second list so | |
| 1190 // they are not going away that easy. | |
| 1191 if (new_eviction_) { | |
| 1192 EXPECT_EQ(net::OK, DoomAllEntries()); | |
| 1193 } | |
| 1194 | |
| 1195 entry->Close(); // Trim the cache. | |
| 1196 FlushQueueForTest(); | |
| 1197 | |
| 1198 // We may abort the eviction before cleaning up everything. | |
| 1199 base::MessageLoop::current()->RunUntilIdle(); | |
| 1200 FlushQueueForTest(); | |
| 1201 // If it's not clear enough: we may still have eviction tasks running at this | |
| 1202 // time, so the number of entries is changing while we read it. | |
| 1203 ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN(); | |
| 1204 EXPECT_GE(30, cache_->GetEntryCount()); | |
| 1205 ANNOTATE_IGNORE_READS_AND_WRITES_END(); | |
| 1206 } | |
| 1207 | |
| 1208 // We'll be leaking memory from this test. | |
| 1209 TEST_F(DiskCacheBackendTest, TrimInvalidEntry2) { | |
| 1210 BackendTrimInvalidEntry2(); | |
| 1211 } | |
| 1212 | |
| 1213 // We'll be leaking memory from this test. | |
| 1214 TEST_F(DiskCacheBackendTest, NewEvictionTrimInvalidEntry2) { | |
| 1215 SetNewEviction(); | |
| 1216 BackendTrimInvalidEntry2(); | |
| 1217 } | |
| 1218 #endif // !defined(LEAK_SANITIZER) | |
| 1219 | |
| 1220 void DiskCacheBackendTest::BackendEnumerations() { | |
| 1221 InitCache(); | |
| 1222 Time initial = Time::Now(); | |
| 1223 | |
| 1224 const int kNumEntries = 100; | |
| 1225 for (int i = 0; i < kNumEntries; i++) { | |
| 1226 std::string key = GenerateKey(true); | |
| 1227 disk_cache::Entry* entry; | |
| 1228 ASSERT_EQ(net::OK, CreateEntry(key, &entry)); | |
| 1229 entry->Close(); | |
| 1230 } | |
| 1231 EXPECT_EQ(kNumEntries, cache_->GetEntryCount()); | |
| 1232 Time final = Time::Now(); | |
| 1233 | |
| 1234 disk_cache::Entry* entry; | |
| 1235 void* iter = NULL; | |
| 1236 int count = 0; | |
| 1237 Time last_modified[kNumEntries]; | |
| 1238 Time last_used[kNumEntries]; | |
| 1239 while (OpenNextEntry(&iter, &entry) == net::OK) { | |
| 1240 ASSERT_TRUE(NULL != entry); | |
| 1241 if (count < kNumEntries) { | |
| 1242 last_modified[count] = entry->GetLastModified(); | |
| 1243 last_used[count] = entry->GetLastUsed(); | |
| 1244 EXPECT_TRUE(initial <= last_modified[count]); | |
| 1245 EXPECT_TRUE(final >= last_modified[count]); | |
| 1246 } | |
| 1247 | |
| 1248 entry->Close(); | |
| 1249 count++; | |
| 1250 }; | |
| 1251 EXPECT_EQ(kNumEntries, count); | |
| 1252 | |
| 1253 iter = NULL; | |
| 1254 count = 0; | |
| 1255 // The previous enumeration should not have changed the timestamps. | |
| 1256 while (OpenNextEntry(&iter, &entry) == net::OK) { | |
| 1257 ASSERT_TRUE(NULL != entry); | |
| 1258 if (count < kNumEntries) { | |
| 1259 EXPECT_TRUE(last_modified[count] == entry->GetLastModified()); | |
| 1260 EXPECT_TRUE(last_used[count] == entry->GetLastUsed()); | |
| 1261 } | |
| 1262 entry->Close(); | |
| 1263 count++; | |
| 1264 }; | |
| 1265 EXPECT_EQ(kNumEntries, count); | |
| 1266 } | |
| 1267 | |
| 1268 TEST_F(DiskCacheBackendTest, Enumerations) { | |
| 1269 BackendEnumerations(); | |
| 1270 } | |
| 1271 | |
| 1272 TEST_F(DiskCacheBackendTest, NewEvictionEnumerations) { | |
| 1273 SetNewEviction(); | |
| 1274 BackendEnumerations(); | |
| 1275 } | |
| 1276 | |
| 1277 TEST_F(DiskCacheBackendTest, MemoryOnlyEnumerations) { | |
| 1278 SetMemoryOnlyMode(); | |
| 1279 BackendEnumerations(); | |
| 1280 } | |
| 1281 | |
| 1282 TEST_F(DiskCacheBackendTest, ShaderCacheEnumerations) { | |
| 1283 SetCacheType(net::SHADER_CACHE); | |
| 1284 BackendEnumerations(); | |
| 1285 } | |
| 1286 | |
| 1287 TEST_F(DiskCacheBackendTest, AppCacheEnumerations) { | |
| 1288 SetCacheType(net::APP_CACHE); | |
| 1289 BackendEnumerations(); | |
| 1290 } | |
| 1291 | |
| 1292 // Verifies enumerations while entries are open. | |
| 1293 void DiskCacheBackendTest::BackendEnumerations2() { | |
| 1294 InitCache(); | |
| 1295 const std::string first("first"); | |
| 1296 const std::string second("second"); | |
| 1297 disk_cache::Entry *entry1, *entry2; | |
| 1298 ASSERT_EQ(net::OK, CreateEntry(first, &entry1)); | |
| 1299 entry1->Close(); | |
| 1300 ASSERT_EQ(net::OK, CreateEntry(second, &entry2)); | |
| 1301 entry2->Close(); | |
| 1302 FlushQueueForTest(); | |
| 1303 | |
| 1304 // Make sure that the timestamp is not the same. | |
| 1305 AddDelay(); | |
| 1306 ASSERT_EQ(net::OK, OpenEntry(second, &entry1)); | |
| 1307 void* iter = NULL; | |
| 1308 ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry2)); | |
| 1309 EXPECT_EQ(entry2->GetKey(), second); | |
| 1310 | |
| 1311 // Two entries and the iterator pointing at "first". | |
| 1312 entry1->Close(); | |
| 1313 entry2->Close(); | |
| 1314 | |
| 1315 // The iterator should still be valid, so we should not crash. | |
| 1316 ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry2)); | |
| 1317 EXPECT_EQ(entry2->GetKey(), first); | |
| 1318 entry2->Close(); | |
| 1319 cache_->EndEnumeration(&iter); | |
| 1320 | |
| 1321 // Modify the oldest entry and get the newest element. | |
| 1322 ASSERT_EQ(net::OK, OpenEntry(first, &entry1)); | |
| 1323 EXPECT_EQ(0, WriteData(entry1, 0, 200, NULL, 0, false)); | |
| 1324 ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry2)); | |
| 1325 if (type_ == net::APP_CACHE) { | |
| 1326 // The list is not updated. | |
| 1327 EXPECT_EQ(entry2->GetKey(), second); | |
| 1328 } else { | |
| 1329 EXPECT_EQ(entry2->GetKey(), first); | |
| 1330 } | |
| 1331 | |
| 1332 entry1->Close(); | |
| 1333 entry2->Close(); | |
| 1334 cache_->EndEnumeration(&iter); | |
| 1335 } | |
| 1336 | |
| 1337 TEST_F(DiskCacheBackendTest, Enumerations2) { | |
| 1338 BackendEnumerations2(); | |
| 1339 } | |
| 1340 | |
| 1341 TEST_F(DiskCacheBackendTest, NewEvictionEnumerations2) { | |
| 1342 SetNewEviction(); | |
| 1343 BackendEnumerations2(); | |
| 1344 } | |
| 1345 | |
| 1346 TEST_F(DiskCacheBackendTest, MemoryOnlyEnumerations2) { | |
| 1347 SetMemoryOnlyMode(); | |
| 1348 BackendEnumerations2(); | |
| 1349 } | |
| 1350 | |
| 1351 TEST_F(DiskCacheBackendTest, AppCacheEnumerations2) { | |
| 1352 SetCacheType(net::APP_CACHE); | |
| 1353 BackendEnumerations2(); | |
| 1354 } | |
| 1355 | |
| 1356 TEST_F(DiskCacheBackendTest, ShaderCacheEnumerations2) { | |
| 1357 SetCacheType(net::SHADER_CACHE); | |
| 1358 BackendEnumerations2(); | |
| 1359 } | |
| 1360 | |
| 1361 // Verify that ReadData calls do not update the LRU cache | |
| 1362 // when using the SHADER_CACHE type. | |
| 1363 TEST_F(DiskCacheBackendTest, ShaderCacheEnumerationReadData) { | |
| 1364 SetCacheType(net::SHADER_CACHE); | |
| 1365 InitCache(); | |
| 1366 const std::string first("first"); | |
| 1367 const std::string second("second"); | |
| 1368 disk_cache::Entry *entry1, *entry2; | |
| 1369 const int kSize = 50; | |
| 1370 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize)); | |
| 1371 | |
| 1372 ASSERT_EQ(net::OK, CreateEntry(first, &entry1)); | |
| 1373 memset(buffer1->data(), 0, kSize); | |
| 1374 base::strlcpy(buffer1->data(), "And the data to save", kSize); | |
| 1375 EXPECT_EQ(kSize, WriteData(entry1, 0, 0, buffer1.get(), kSize, false)); | |
| 1376 | |
| 1377 ASSERT_EQ(net::OK, CreateEntry(second, &entry2)); | |
| 1378 entry2->Close(); | |
| 1379 | |
| 1380 FlushQueueForTest(); | |
| 1381 | |
| 1382 // Make sure that the timestamp is not the same. | |
| 1383 AddDelay(); | |
| 1384 | |
| 1385 // Read from the last item in the LRU. | |
| 1386 EXPECT_EQ(kSize, ReadData(entry1, 0, 0, buffer1.get(), kSize)); | |
| 1387 entry1->Close(); | |
| 1388 | |
| 1389 void* iter = NULL; | |
| 1390 ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry2)); | |
| 1391 EXPECT_EQ(entry2->GetKey(), second); | |
| 1392 entry2->Close(); | |
| 1393 cache_->EndEnumeration(&iter); | |
| 1394 } | |
| 1395 | |
| 1396 #if !defined(LEAK_SANITIZER) | |
| 1397 // Verify handling of invalid entries while doing enumerations. | |
| 1398 // We'll be leaking memory from this test. | |
| 1399 void DiskCacheBackendTest::BackendInvalidEntryEnumeration() { | |
| 1400 InitCache(); | |
| 1401 | |
| 1402 std::string key("Some key"); | |
| 1403 disk_cache::Entry *entry, *entry1, *entry2; | |
| 1404 ASSERT_EQ(net::OK, CreateEntry(key, &entry1)); | |
| 1405 | |
| 1406 const int kSize = 50; | |
| 1407 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize)); | |
| 1408 memset(buffer1->data(), 0, kSize); | |
| 1409 base::strlcpy(buffer1->data(), "And the data to save", kSize); | |
| 1410 EXPECT_EQ(kSize, WriteData(entry1, 0, 0, buffer1.get(), kSize, false)); | |
| 1411 entry1->Close(); | |
| 1412 ASSERT_EQ(net::OK, OpenEntry(key, &entry1)); | |
| 1413 EXPECT_EQ(kSize, ReadData(entry1, 0, 0, buffer1.get(), kSize)); | |
| 1414 | |
| 1415 std::string key2("Another key"); | |
| 1416 ASSERT_EQ(net::OK, CreateEntry(key2, &entry2)); | |
| 1417 entry2->Close(); | |
| 1418 ASSERT_EQ(2, cache_->GetEntryCount()); | |
| 1419 | |
| 1420 SimulateCrash(); | |
| 1421 | |
| 1422 void* iter = NULL; | |
| 1423 int count = 0; | |
| 1424 while (OpenNextEntry(&iter, &entry) == net::OK) { | |
| 1425 ASSERT_TRUE(NULL != entry); | |
| 1426 EXPECT_EQ(key2, entry->GetKey()); | |
| 1427 entry->Close(); | |
| 1428 count++; | |
| 1429 }; | |
| 1430 EXPECT_EQ(1, count); | |
| 1431 EXPECT_EQ(1, cache_->GetEntryCount()); | |
| 1432 } | |
| 1433 | |
| 1434 // We'll be leaking memory from this test. | |
| 1435 TEST_F(DiskCacheBackendTest, InvalidEntryEnumeration) { | |
| 1436 BackendInvalidEntryEnumeration(); | |
| 1437 } | |
| 1438 | |
| 1439 // We'll be leaking memory from this test. | |
| 1440 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntryEnumeration) { | |
| 1441 SetNewEviction(); | |
| 1442 BackendInvalidEntryEnumeration(); | |
| 1443 } | |
| 1444 #endif // !defined(LEAK_SANITIZER) | |
| 1445 | |
| 1446 // Tests that if for some reason entries are modified close to existing cache | |
| 1447 // iterators, we don't generate fatal errors or reset the cache. | |
| 1448 void DiskCacheBackendTest::BackendFixEnumerators() { | |
| 1449 InitCache(); | |
| 1450 | |
| 1451 int seed = static_cast<int>(Time::Now().ToInternalValue()); | |
| 1452 srand(seed); | |
| 1453 | |
| 1454 const int kNumEntries = 10; | |
| 1455 for (int i = 0; i < kNumEntries; i++) { | |
| 1456 std::string key = GenerateKey(true); | |
| 1457 disk_cache::Entry* entry; | |
| 1458 ASSERT_EQ(net::OK, CreateEntry(key, &entry)); | |
| 1459 entry->Close(); | |
| 1460 } | |
| 1461 EXPECT_EQ(kNumEntries, cache_->GetEntryCount()); | |
| 1462 | |
| 1463 disk_cache::Entry *entry1, *entry2; | |
| 1464 void* iter1 = NULL; | |
| 1465 void* iter2 = NULL; | |
| 1466 ASSERT_EQ(net::OK, OpenNextEntry(&iter1, &entry1)); | |
| 1467 ASSERT_TRUE(NULL != entry1); | |
| 1468 entry1->Close(); | |
| 1469 entry1 = NULL; | |
| 1470 | |
| 1471 // Let's go to the middle of the list. | |
| 1472 for (int i = 0; i < kNumEntries / 2; i++) { | |
| 1473 if (entry1) | |
| 1474 entry1->Close(); | |
| 1475 ASSERT_EQ(net::OK, OpenNextEntry(&iter1, &entry1)); | |
| 1476 ASSERT_TRUE(NULL != entry1); | |
| 1477 | |
| 1478 ASSERT_EQ(net::OK, OpenNextEntry(&iter2, &entry2)); | |
| 1479 ASSERT_TRUE(NULL != entry2); | |
| 1480 entry2->Close(); | |
| 1481 } | |
| 1482 | |
| 1483 // Messing up with entry1 will modify entry2->next. | |
| 1484 entry1->Doom(); | |
| 1485 ASSERT_EQ(net::OK, OpenNextEntry(&iter2, &entry2)); | |
| 1486 ASSERT_TRUE(NULL != entry2); | |
| 1487 | |
| 1488 // The link entry2->entry1 should be broken. | |
| 1489 EXPECT_NE(entry2->GetKey(), entry1->GetKey()); | |
| 1490 entry1->Close(); | |
| 1491 entry2->Close(); | |
| 1492 | |
| 1493 // And the second iterator should keep working. | |
| 1494 ASSERT_EQ(net::OK, OpenNextEntry(&iter2, &entry2)); | |
| 1495 ASSERT_TRUE(NULL != entry2); | |
| 1496 entry2->Close(); | |
| 1497 | |
| 1498 cache_->EndEnumeration(&iter1); | |
| 1499 cache_->EndEnumeration(&iter2); | |
| 1500 } | |
| 1501 | |
| 1502 TEST_F(DiskCacheBackendTest, FixEnumerators) { | |
| 1503 BackendFixEnumerators(); | |
| 1504 } | |
| 1505 | |
| 1506 TEST_F(DiskCacheBackendTest, NewEvictionFixEnumerators) { | |
| 1507 SetNewEviction(); | |
| 1508 BackendFixEnumerators(); | |
| 1509 } | |
| 1510 | |
| 1511 void DiskCacheBackendTest::BackendDoomRecent() { | |
| 1512 InitCache(); | |
| 1513 | |
| 1514 disk_cache::Entry *entry; | |
| 1515 ASSERT_EQ(net::OK, CreateEntry("first", &entry)); | |
| 1516 entry->Close(); | |
| 1517 ASSERT_EQ(net::OK, CreateEntry("second", &entry)); | |
| 1518 entry->Close(); | |
| 1519 FlushQueueForTest(); | |
| 1520 | |
| 1521 AddDelay(); | |
| 1522 Time middle = Time::Now(); | |
| 1523 | |
| 1524 ASSERT_EQ(net::OK, CreateEntry("third", &entry)); | |
| 1525 entry->Close(); | |
| 1526 ASSERT_EQ(net::OK, CreateEntry("fourth", &entry)); | |
| 1527 entry->Close(); | |
| 1528 FlushQueueForTest(); | |
| 1529 | |
| 1530 AddDelay(); | |
| 1531 Time final = Time::Now(); | |
| 1532 | |
| 1533 ASSERT_EQ(4, cache_->GetEntryCount()); | |
| 1534 EXPECT_EQ(net::OK, DoomEntriesSince(final)); | |
| 1535 ASSERT_EQ(4, cache_->GetEntryCount()); | |
| 1536 | |
| 1537 EXPECT_EQ(net::OK, DoomEntriesSince(middle)); | |
| 1538 ASSERT_EQ(2, cache_->GetEntryCount()); | |
| 1539 | |
| 1540 ASSERT_EQ(net::OK, OpenEntry("second", &entry)); | |
| 1541 entry->Close(); | |
| 1542 } | |
| 1543 | |
| 1544 TEST_F(DiskCacheBackendTest, DoomRecent) { | |
| 1545 BackendDoomRecent(); | |
| 1546 } | |
| 1547 | |
| 1548 TEST_F(DiskCacheBackendTest, NewEvictionDoomRecent) { | |
| 1549 SetNewEviction(); | |
| 1550 BackendDoomRecent(); | |
| 1551 } | |
| 1552 | |
| 1553 TEST_F(DiskCacheBackendTest, MemoryOnlyDoomRecent) { | |
| 1554 SetMemoryOnlyMode(); | |
| 1555 BackendDoomRecent(); | |
| 1556 } | |
| 1557 | |
| 1558 TEST_F(DiskCacheBackendTest, MemoryOnlyDoomEntriesSinceSparse) { | |
| 1559 SetMemoryOnlyMode(); | |
| 1560 base::Time start; | |
| 1561 InitSparseCache(&start, NULL); | |
| 1562 DoomEntriesSince(start); | |
| 1563 EXPECT_EQ(1, cache_->GetEntryCount()); | |
| 1564 } | |
| 1565 | |
| 1566 TEST_F(DiskCacheBackendTest, DoomEntriesSinceSparse) { | |
| 1567 base::Time start; | |
| 1568 InitSparseCache(&start, NULL); | |
| 1569 DoomEntriesSince(start); | |
| 1570 // NOTE: BackendImpl counts child entries in its GetEntryCount(), while | |
| 1571 // MemBackendImpl does not. Thats why expected value differs here from | |
| 1572 // MemoryOnlyDoomEntriesSinceSparse. | |
| 1573 EXPECT_EQ(3, cache_->GetEntryCount()); | |
| 1574 } | |
| 1575 | |
| 1576 TEST_F(DiskCacheBackendTest, MemoryOnlyDoomAllSparse) { | |
| 1577 SetMemoryOnlyMode(); | |
| 1578 InitSparseCache(NULL, NULL); | |
| 1579 EXPECT_EQ(net::OK, DoomAllEntries()); | |
| 1580 EXPECT_EQ(0, cache_->GetEntryCount()); | |
| 1581 } | |
| 1582 | |
| 1583 TEST_F(DiskCacheBackendTest, DoomAllSparse) { | |
| 1584 InitSparseCache(NULL, NULL); | |
| 1585 EXPECT_EQ(net::OK, DoomAllEntries()); | |
| 1586 EXPECT_EQ(0, cache_->GetEntryCount()); | |
| 1587 } | |
| 1588 | |
| 1589 void DiskCacheBackendTest::BackendDoomBetween() { | |
| 1590 InitCache(); | |
| 1591 | |
| 1592 disk_cache::Entry *entry; | |
| 1593 ASSERT_EQ(net::OK, CreateEntry("first", &entry)); | |
| 1594 entry->Close(); | |
| 1595 FlushQueueForTest(); | |
| 1596 | |
| 1597 AddDelay(); | |
| 1598 Time middle_start = Time::Now(); | |
| 1599 | |
| 1600 ASSERT_EQ(net::OK, CreateEntry("second", &entry)); | |
| 1601 entry->Close(); | |
| 1602 ASSERT_EQ(net::OK, CreateEntry("third", &entry)); | |
| 1603 entry->Close(); | |
| 1604 FlushQueueForTest(); | |
| 1605 | |
| 1606 AddDelay(); | |
| 1607 Time middle_end = Time::Now(); | |
| 1608 AddDelay(); | |
| 1609 | |
| 1610 ASSERT_EQ(net::OK, CreateEntry("fourth", &entry)); | |
| 1611 entry->Close(); | |
| 1612 ASSERT_EQ(net::OK, OpenEntry("fourth", &entry)); | |
| 1613 entry->Close(); | |
| 1614 FlushQueueForTest(); | |
| 1615 | |
| 1616 AddDelay(); | |
| 1617 Time final = Time::Now(); | |
| 1618 | |
| 1619 ASSERT_EQ(4, cache_->GetEntryCount()); | |
| 1620 EXPECT_EQ(net::OK, DoomEntriesBetween(middle_start, middle_end)); | |
| 1621 ASSERT_EQ(2, cache_->GetEntryCount()); | |
| 1622 | |
| 1623 ASSERT_EQ(net::OK, OpenEntry("fourth", &entry)); | |
| 1624 entry->Close(); | |
| 1625 | |
| 1626 EXPECT_EQ(net::OK, DoomEntriesBetween(middle_start, final)); | |
| 1627 ASSERT_EQ(1, cache_->GetEntryCount()); | |
| 1628 | |
| 1629 ASSERT_EQ(net::OK, OpenEntry("first", &entry)); | |
| 1630 entry->Close(); | |
| 1631 } | |
| 1632 | |
| 1633 TEST_F(DiskCacheBackendTest, DoomBetween) { | |
| 1634 BackendDoomBetween(); | |
| 1635 } | |
| 1636 | |
| 1637 TEST_F(DiskCacheBackendTest, NewEvictionDoomBetween) { | |
| 1638 SetNewEviction(); | |
| 1639 BackendDoomBetween(); | |
| 1640 } | |
| 1641 | |
| 1642 TEST_F(DiskCacheBackendTest, MemoryOnlyDoomBetween) { | |
| 1643 SetMemoryOnlyMode(); | |
| 1644 BackendDoomBetween(); | |
| 1645 } | |
| 1646 | |
| 1647 TEST_F(DiskCacheBackendTest, MemoryOnlyDoomEntriesBetweenSparse) { | |
| 1648 SetMemoryOnlyMode(); | |
| 1649 base::Time start, end; | |
| 1650 InitSparseCache(&start, &end); | |
| 1651 DoomEntriesBetween(start, end); | |
| 1652 EXPECT_EQ(3, cache_->GetEntryCount()); | |
| 1653 | |
| 1654 start = end; | |
| 1655 end = base::Time::Now(); | |
| 1656 DoomEntriesBetween(start, end); | |
| 1657 EXPECT_EQ(1, cache_->GetEntryCount()); | |
| 1658 } | |
| 1659 | |
| 1660 TEST_F(DiskCacheBackendTest, DoomEntriesBetweenSparse) { | |
| 1661 base::Time start, end; | |
| 1662 InitSparseCache(&start, &end); | |
| 1663 DoomEntriesBetween(start, end); | |
| 1664 EXPECT_EQ(9, cache_->GetEntryCount()); | |
| 1665 | |
| 1666 start = end; | |
| 1667 end = base::Time::Now(); | |
| 1668 DoomEntriesBetween(start, end); | |
| 1669 EXPECT_EQ(3, cache_->GetEntryCount()); | |
| 1670 } | |
| 1671 | |
| 1672 void DiskCacheBackendTest::BackendTransaction(const std::string& name, | |
| 1673 int num_entries, bool load) { | |
| 1674 success_ = false; | |
| 1675 ASSERT_TRUE(CopyTestCache(name)); | |
| 1676 DisableFirstCleanup(); | |
| 1677 | |
| 1678 uint32 mask; | |
| 1679 if (load) { | |
| 1680 mask = 0xf; | |
| 1681 SetMaxSize(0x100000); | |
| 1682 } else { | |
| 1683 // Clear the settings from the previous run. | |
| 1684 mask = 0; | |
| 1685 SetMaxSize(0); | |
| 1686 } | |
| 1687 SetMask(mask); | |
| 1688 | |
| 1689 InitCache(); | |
| 1690 ASSERT_EQ(num_entries + 1, cache_->GetEntryCount()); | |
| 1691 | |
| 1692 std::string key("the first key"); | |
| 1693 disk_cache::Entry* entry1; | |
| 1694 ASSERT_NE(net::OK, OpenEntry(key, &entry1)); | |
| 1695 | |
| 1696 int actual = cache_->GetEntryCount(); | |
| 1697 if (num_entries != actual) { | |
| 1698 ASSERT_TRUE(load); | |
| 1699 // If there is a heavy load, inserting an entry will make another entry | |
| 1700 // dirty (on the hash bucket) so two entries are removed. | |
| 1701 ASSERT_EQ(num_entries - 1, actual); | |
| 1702 } | |
| 1703 | |
| 1704 cache_.reset(); | |
| 1705 cache_impl_ = NULL; | |
| 1706 | |
| 1707 ASSERT_TRUE(CheckCacheIntegrity(cache_path_, new_eviction_, mask)); | |
| 1708 success_ = true; | |
| 1709 } | |
| 1710 | |
| 1711 void DiskCacheBackendTest::BackendRecoverInsert() { | |
| 1712 // Tests with an empty cache. | |
| 1713 BackendTransaction("insert_empty1", 0, false); | |
| 1714 ASSERT_TRUE(success_) << "insert_empty1"; | |
| 1715 BackendTransaction("insert_empty2", 0, false); | |
| 1716 ASSERT_TRUE(success_) << "insert_empty2"; | |
| 1717 BackendTransaction("insert_empty3", 0, false); | |
| 1718 ASSERT_TRUE(success_) << "insert_empty3"; | |
| 1719 | |
| 1720 // Tests with one entry on the cache. | |
| 1721 BackendTransaction("insert_one1", 1, false); | |
| 1722 ASSERT_TRUE(success_) << "insert_one1"; | |
| 1723 BackendTransaction("insert_one2", 1, false); | |
| 1724 ASSERT_TRUE(success_) << "insert_one2"; | |
| 1725 BackendTransaction("insert_one3", 1, false); | |
| 1726 ASSERT_TRUE(success_) << "insert_one3"; | |
| 1727 | |
| 1728 // Tests with one hundred entries on the cache, tiny index. | |
| 1729 BackendTransaction("insert_load1", 100, true); | |
| 1730 ASSERT_TRUE(success_) << "insert_load1"; | |
| 1731 BackendTransaction("insert_load2", 100, true); | |
| 1732 ASSERT_TRUE(success_) << "insert_load2"; | |
| 1733 } | |
| 1734 | |
| 1735 TEST_F(DiskCacheBackendTest, RecoverInsert) { | |
| 1736 BackendRecoverInsert(); | |
| 1737 } | |
| 1738 | |
| 1739 TEST_F(DiskCacheBackendTest, NewEvictionRecoverInsert) { | |
| 1740 SetNewEviction(); | |
| 1741 BackendRecoverInsert(); | |
| 1742 } | |
| 1743 | |
| 1744 void DiskCacheBackendTest::BackendRecoverRemove() { | |
| 1745 // Removing the only element. | |
| 1746 BackendTransaction("remove_one1", 0, false); | |
| 1747 ASSERT_TRUE(success_) << "remove_one1"; | |
| 1748 BackendTransaction("remove_one2", 0, false); | |
| 1749 ASSERT_TRUE(success_) << "remove_one2"; | |
| 1750 BackendTransaction("remove_one3", 0, false); | |
| 1751 ASSERT_TRUE(success_) << "remove_one3"; | |
| 1752 | |
| 1753 // Removing the head. | |
| 1754 BackendTransaction("remove_head1", 1, false); | |
| 1755 ASSERT_TRUE(success_) << "remove_head1"; | |
| 1756 BackendTransaction("remove_head2", 1, false); | |
| 1757 ASSERT_TRUE(success_) << "remove_head2"; | |
| 1758 BackendTransaction("remove_head3", 1, false); | |
| 1759 ASSERT_TRUE(success_) << "remove_head3"; | |
| 1760 | |
| 1761 // Removing the tail. | |
| 1762 BackendTransaction("remove_tail1", 1, false); | |
| 1763 ASSERT_TRUE(success_) << "remove_tail1"; | |
| 1764 BackendTransaction("remove_tail2", 1, false); | |
| 1765 ASSERT_TRUE(success_) << "remove_tail2"; | |
| 1766 BackendTransaction("remove_tail3", 1, false); | |
| 1767 ASSERT_TRUE(success_) << "remove_tail3"; | |
| 1768 | |
| 1769 // Removing with one hundred entries on the cache, tiny index. | |
| 1770 BackendTransaction("remove_load1", 100, true); | |
| 1771 ASSERT_TRUE(success_) << "remove_load1"; | |
| 1772 BackendTransaction("remove_load2", 100, true); | |
| 1773 ASSERT_TRUE(success_) << "remove_load2"; | |
| 1774 BackendTransaction("remove_load3", 100, true); | |
| 1775 ASSERT_TRUE(success_) << "remove_load3"; | |
| 1776 | |
| 1777 // This case cannot be reverted. | |
| 1778 BackendTransaction("remove_one4", 0, false); | |
| 1779 ASSERT_TRUE(success_) << "remove_one4"; | |
| 1780 BackendTransaction("remove_head4", 1, false); | |
| 1781 ASSERT_TRUE(success_) << "remove_head4"; | |
| 1782 } | |
| 1783 | |
| 1784 TEST_F(DiskCacheBackendTest, RecoverRemove) { | |
| 1785 BackendRecoverRemove(); | |
| 1786 } | |
| 1787 | |
| 1788 TEST_F(DiskCacheBackendTest, NewEvictionRecoverRemove) { | |
| 1789 SetNewEviction(); | |
| 1790 BackendRecoverRemove(); | |
| 1791 } | |
| 1792 | |
| 1793 void DiskCacheBackendTest::BackendRecoverWithEviction() { | |
| 1794 success_ = false; | |
| 1795 ASSERT_TRUE(CopyTestCache("insert_load1")); | |
| 1796 DisableFirstCleanup(); | |
| 1797 | |
| 1798 SetMask(0xf); | |
| 1799 SetMaxSize(0x1000); | |
| 1800 | |
| 1801 // We should not crash here. | |
| 1802 InitCache(); | |
| 1803 DisableIntegrityCheck(); | |
| 1804 } | |
| 1805 | |
| 1806 TEST_F(DiskCacheBackendTest, RecoverWithEviction) { | |
| 1807 BackendRecoverWithEviction(); | |
| 1808 } | |
| 1809 | |
| 1810 TEST_F(DiskCacheBackendTest, NewEvictionRecoverWithEviction) { | |
| 1811 SetNewEviction(); | |
| 1812 BackendRecoverWithEviction(); | |
| 1813 } | |
| 1814 | |
| 1815 // Tests that the |BackendImpl| fails to start with the wrong cache version. | |
| 1816 TEST_F(DiskCacheTest, WrongVersion) { | |
| 1817 ASSERT_TRUE(CopyTestCache("wrong_version")); | |
| 1818 base::Thread cache_thread("CacheThread"); | |
| 1819 ASSERT_TRUE(cache_thread.StartWithOptions( | |
| 1820 base::Thread::Options(base::MessageLoop::TYPE_IO, 0))); | |
| 1821 net::TestCompletionCallback cb; | |
| 1822 | |
| 1823 scoped_ptr<disk_cache::BackendImpl> cache(new disk_cache::BackendImpl( | |
| 1824 cache_path_, cache_thread.message_loop_proxy().get(), NULL)); | |
| 1825 int rv = cache->Init(cb.callback()); | |
| 1826 ASSERT_EQ(net::ERR_FAILED, cb.GetResult(rv)); | |
| 1827 } | |
| 1828 | |
| 1829 class BadEntropyProvider : public base::FieldTrial::EntropyProvider { | |
| 1830 public: | |
| 1831 virtual ~BadEntropyProvider() {} | |
| 1832 | |
| 1833 virtual double GetEntropyForTrial(const std::string& trial_name, | |
| 1834 uint32 randomization_seed) const OVERRIDE { | |
| 1835 return 0.5; | |
| 1836 } | |
| 1837 }; | |
| 1838 | |
| 1839 // Tests that the disk cache successfully joins the control group, dropping the | |
| 1840 // existing cache in favour of a new empty cache. | |
| 1841 TEST_F(DiskCacheTest, SimpleCacheControlJoin) { | |
| 1842 base::Thread cache_thread("CacheThread"); | |
| 1843 ASSERT_TRUE(cache_thread.StartWithOptions( | |
| 1844 base::Thread::Options(base::MessageLoop::TYPE_IO, 0))); | |
| 1845 | |
| 1846 scoped_ptr<disk_cache::BackendImpl> cache = | |
| 1847 CreateExistingEntryCache(cache_thread, cache_path_); | |
| 1848 ASSERT_TRUE(cache.get()); | |
| 1849 cache.reset(); | |
| 1850 | |
| 1851 // Instantiate the SimpleCacheTrial, forcing this run into the | |
| 1852 // ExperimentControl group. | |
| 1853 base::FieldTrialList field_trial_list(new BadEntropyProvider()); | |
| 1854 base::FieldTrialList::CreateFieldTrial("SimpleCacheTrial", | |
| 1855 "ExperimentControl"); | |
| 1856 net::TestCompletionCallback cb; | |
| 1857 scoped_ptr<disk_cache::Backend> base_cache; | |
| 1858 int rv = | |
| 1859 disk_cache::CreateCacheBackend(net::DISK_CACHE, | |
| 1860 net::CACHE_BACKEND_BLOCKFILE, | |
| 1861 cache_path_, | |
| 1862 0, | |
| 1863 true, | |
| 1864 cache_thread.message_loop_proxy().get(), | |
| 1865 NULL, | |
| 1866 &base_cache, | |
| 1867 cb.callback()); | |
| 1868 ASSERT_EQ(net::OK, cb.GetResult(rv)); | |
| 1869 EXPECT_EQ(0, base_cache->GetEntryCount()); | |
| 1870 } | |
| 1871 | |
| 1872 // Tests that the disk cache can restart in the control group preserving | |
| 1873 // existing entries. | |
| 1874 TEST_F(DiskCacheTest, SimpleCacheControlRestart) { | |
| 1875 // Instantiate the SimpleCacheTrial, forcing this run into the | |
| 1876 // ExperimentControl group. | |
| 1877 base::FieldTrialList field_trial_list(new BadEntropyProvider()); | |
| 1878 base::FieldTrialList::CreateFieldTrial("SimpleCacheTrial", | |
| 1879 "ExperimentControl"); | |
| 1880 | |
| 1881 base::Thread cache_thread("CacheThread"); | |
| 1882 ASSERT_TRUE(cache_thread.StartWithOptions( | |
| 1883 base::Thread::Options(base::MessageLoop::TYPE_IO, 0))); | |
| 1884 | |
| 1885 scoped_ptr<disk_cache::BackendImpl> cache = | |
| 1886 CreateExistingEntryCache(cache_thread, cache_path_); | |
| 1887 ASSERT_TRUE(cache.get()); | |
| 1888 | |
| 1889 net::TestCompletionCallback cb; | |
| 1890 | |
| 1891 const int kRestartCount = 5; | |
| 1892 for (int i = 0; i < kRestartCount; ++i) { | |
| 1893 cache.reset(new disk_cache::BackendImpl( | |
| 1894 cache_path_, cache_thread.message_loop_proxy(), NULL)); | |
| 1895 int rv = cache->Init(cb.callback()); | |
| 1896 ASSERT_EQ(net::OK, cb.GetResult(rv)); | |
| 1897 EXPECT_EQ(1, cache->GetEntryCount()); | |
| 1898 | |
| 1899 disk_cache::Entry* entry = NULL; | |
| 1900 rv = cache->OpenEntry(kExistingEntryKey, &entry, cb.callback()); | |
| 1901 EXPECT_EQ(net::OK, cb.GetResult(rv)); | |
| 1902 EXPECT_TRUE(entry); | |
| 1903 entry->Close(); | |
| 1904 } | |
| 1905 } | |
| 1906 | |
| 1907 // Tests that the disk cache can leave the control group preserving existing | |
| 1908 // entries. | |
| 1909 TEST_F(DiskCacheTest, SimpleCacheControlLeave) { | |
| 1910 base::Thread cache_thread("CacheThread"); | |
| 1911 ASSERT_TRUE(cache_thread.StartWithOptions( | |
| 1912 base::Thread::Options(base::MessageLoop::TYPE_IO, 0))); | |
| 1913 | |
| 1914 { | |
| 1915 // Instantiate the SimpleCacheTrial, forcing this run into the | |
| 1916 // ExperimentControl group. | |
| 1917 base::FieldTrialList field_trial_list(new BadEntropyProvider()); | |
| 1918 base::FieldTrialList::CreateFieldTrial("SimpleCacheTrial", | |
| 1919 "ExperimentControl"); | |
| 1920 | |
| 1921 scoped_ptr<disk_cache::BackendImpl> cache = | |
| 1922 CreateExistingEntryCache(cache_thread, cache_path_); | |
| 1923 ASSERT_TRUE(cache.get()); | |
| 1924 } | |
| 1925 | |
| 1926 // Instantiate the SimpleCacheTrial, forcing this run into the | |
| 1927 // ExperimentNo group. | |
| 1928 base::FieldTrialList field_trial_list(new BadEntropyProvider()); | |
| 1929 base::FieldTrialList::CreateFieldTrial("SimpleCacheTrial", "ExperimentNo"); | |
| 1930 net::TestCompletionCallback cb; | |
| 1931 | |
| 1932 const int kRestartCount = 5; | |
| 1933 for (int i = 0; i < kRestartCount; ++i) { | |
| 1934 scoped_ptr<disk_cache::BackendImpl> cache(new disk_cache::BackendImpl( | |
| 1935 cache_path_, cache_thread.message_loop_proxy(), NULL)); | |
| 1936 int rv = cache->Init(cb.callback()); | |
| 1937 ASSERT_EQ(net::OK, cb.GetResult(rv)); | |
| 1938 EXPECT_EQ(1, cache->GetEntryCount()); | |
| 1939 | |
| 1940 disk_cache::Entry* entry = NULL; | |
| 1941 rv = cache->OpenEntry(kExistingEntryKey, &entry, cb.callback()); | |
| 1942 EXPECT_EQ(net::OK, cb.GetResult(rv)); | |
| 1943 EXPECT_TRUE(entry); | |
| 1944 entry->Close(); | |
| 1945 } | |
| 1946 } | |
| 1947 | |
| 1948 // Tests that the cache is properly restarted on recovery error. | |
| 1949 TEST_F(DiskCacheBackendTest, DeleteOld) { | |
| 1950 ASSERT_TRUE(CopyTestCache("wrong_version")); | |
| 1951 SetNewEviction(); | |
| 1952 base::Thread cache_thread("CacheThread"); | |
| 1953 ASSERT_TRUE(cache_thread.StartWithOptions( | |
| 1954 base::Thread::Options(base::MessageLoop::TYPE_IO, 0))); | |
| 1955 | |
| 1956 net::TestCompletionCallback cb; | |
| 1957 bool prev = base::ThreadRestrictions::SetIOAllowed(false); | |
| 1958 base::FilePath path(cache_path_); | |
| 1959 int rv = | |
| 1960 disk_cache::CreateCacheBackend(net::DISK_CACHE, | |
| 1961 net::CACHE_BACKEND_BLOCKFILE, | |
| 1962 path, | |
| 1963 0, | |
| 1964 true, | |
| 1965 cache_thread.message_loop_proxy().get(), | |
| 1966 NULL, | |
| 1967 &cache_, | |
| 1968 cb.callback()); | |
| 1969 path.clear(); // Make sure path was captured by the previous call. | |
| 1970 ASSERT_EQ(net::OK, cb.GetResult(rv)); | |
| 1971 base::ThreadRestrictions::SetIOAllowed(prev); | |
| 1972 cache_.reset(); | |
| 1973 EXPECT_TRUE(CheckCacheIntegrity(cache_path_, new_eviction_, mask_)); | |
| 1974 } | |
| 1975 | |
| 1976 // We want to be able to deal with messed up entries on disk. | |
| 1977 void DiskCacheBackendTest::BackendInvalidEntry2() { | |
| 1978 ASSERT_TRUE(CopyTestCache("bad_entry")); | |
| 1979 DisableFirstCleanup(); | |
| 1980 InitCache(); | |
| 1981 | |
| 1982 disk_cache::Entry *entry1, *entry2; | |
| 1983 ASSERT_EQ(net::OK, OpenEntry("the first key", &entry1)); | |
| 1984 EXPECT_NE(net::OK, OpenEntry("some other key", &entry2)); | |
| 1985 entry1->Close(); | |
| 1986 | |
| 1987 // CheckCacheIntegrity will fail at this point. | |
| 1988 DisableIntegrityCheck(); | |
| 1989 } | |
| 1990 | |
| 1991 TEST_F(DiskCacheBackendTest, InvalidEntry2) { | |
| 1992 BackendInvalidEntry2(); | |
| 1993 } | |
| 1994 | |
| 1995 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntry2) { | |
| 1996 SetNewEviction(); | |
| 1997 BackendInvalidEntry2(); | |
| 1998 } | |
| 1999 | |
| 2000 // Tests that we don't crash or hang when enumerating this cache. | |
| 2001 void DiskCacheBackendTest::BackendInvalidEntry3() { | |
| 2002 SetMask(0x1); // 2-entry table. | |
| 2003 SetMaxSize(0x3000); // 12 kB. | |
| 2004 DisableFirstCleanup(); | |
| 2005 InitCache(); | |
| 2006 | |
| 2007 disk_cache::Entry* entry; | |
| 2008 void* iter = NULL; | |
| 2009 while (OpenNextEntry(&iter, &entry) == net::OK) { | |
| 2010 entry->Close(); | |
| 2011 } | |
| 2012 } | |
| 2013 | |
| 2014 TEST_F(DiskCacheBackendTest, InvalidEntry3) { | |
| 2015 ASSERT_TRUE(CopyTestCache("dirty_entry3")); | |
| 2016 BackendInvalidEntry3(); | |
| 2017 } | |
| 2018 | |
| 2019 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntry3) { | |
| 2020 ASSERT_TRUE(CopyTestCache("dirty_entry4")); | |
| 2021 SetNewEviction(); | |
| 2022 BackendInvalidEntry3(); | |
| 2023 DisableIntegrityCheck(); | |
| 2024 } | |
| 2025 | |
| 2026 // Test that we handle a dirty entry on the LRU list, already replaced with | |
| 2027 // the same key, and with hash collisions. | |
| 2028 TEST_F(DiskCacheBackendTest, InvalidEntry4) { | |
| 2029 ASSERT_TRUE(CopyTestCache("dirty_entry3")); | |
| 2030 SetMask(0x1); // 2-entry table. | |
| 2031 SetMaxSize(0x3000); // 12 kB. | |
| 2032 DisableFirstCleanup(); | |
| 2033 InitCache(); | |
| 2034 | |
| 2035 TrimForTest(false); | |
| 2036 } | |
| 2037 | |
| 2038 // Test that we handle a dirty entry on the deleted list, already replaced with | |
| 2039 // the same key, and with hash collisions. | |
| 2040 TEST_F(DiskCacheBackendTest, InvalidEntry5) { | |
| 2041 ASSERT_TRUE(CopyTestCache("dirty_entry4")); | |
| 2042 SetNewEviction(); | |
| 2043 SetMask(0x1); // 2-entry table. | |
| 2044 SetMaxSize(0x3000); // 12 kB. | |
| 2045 DisableFirstCleanup(); | |
| 2046 InitCache(); | |
| 2047 | |
| 2048 TrimDeletedListForTest(false); | |
| 2049 } | |
| 2050 | |
| 2051 TEST_F(DiskCacheBackendTest, InvalidEntry6) { | |
| 2052 ASSERT_TRUE(CopyTestCache("dirty_entry5")); | |
| 2053 SetMask(0x1); // 2-entry table. | |
| 2054 SetMaxSize(0x3000); // 12 kB. | |
| 2055 DisableFirstCleanup(); | |
| 2056 InitCache(); | |
| 2057 | |
| 2058 // There is a dirty entry (but marked as clean) at the end, pointing to a | |
| 2059 // deleted entry through the hash collision list. We should not re-insert the | |
| 2060 // deleted entry into the index table. | |
| 2061 | |
| 2062 TrimForTest(false); | |
| 2063 // The cache should be clean (as detected by CheckCacheIntegrity). | |
| 2064 } | |
| 2065 | |
| 2066 // Tests that we don't hang when there is a loop on the hash collision list. | |
| 2067 // The test cache could be a result of bug 69135. | |
| 2068 TEST_F(DiskCacheBackendTest, BadNextEntry1) { | |
| 2069 ASSERT_TRUE(CopyTestCache("list_loop2")); | |
| 2070 SetMask(0x1); // 2-entry table. | |
| 2071 SetMaxSize(0x3000); // 12 kB. | |
| 2072 DisableFirstCleanup(); | |
| 2073 InitCache(); | |
| 2074 | |
| 2075 // The second entry points at itselft, and the first entry is not accessible | |
| 2076 // though the index, but it is at the head of the LRU. | |
| 2077 | |
| 2078 disk_cache::Entry* entry; | |
| 2079 ASSERT_EQ(net::OK, CreateEntry("The first key", &entry)); | |
| 2080 entry->Close(); | |
| 2081 | |
| 2082 TrimForTest(false); | |
| 2083 TrimForTest(false); | |
| 2084 ASSERT_EQ(net::OK, OpenEntry("The first key", &entry)); | |
| 2085 entry->Close(); | |
| 2086 EXPECT_EQ(1, cache_->GetEntryCount()); | |
| 2087 } | |
| 2088 | |
| 2089 // Tests that we don't hang when there is a loop on the hash collision list. | |
| 2090 // The test cache could be a result of bug 69135. | |
| 2091 TEST_F(DiskCacheBackendTest, BadNextEntry2) { | |
| 2092 ASSERT_TRUE(CopyTestCache("list_loop3")); | |
| 2093 SetMask(0x1); // 2-entry table. | |
| 2094 SetMaxSize(0x3000); // 12 kB. | |
| 2095 DisableFirstCleanup(); | |
| 2096 InitCache(); | |
| 2097 | |
| 2098 // There is a wide loop of 5 entries. | |
| 2099 | |
| 2100 disk_cache::Entry* entry; | |
| 2101 ASSERT_NE(net::OK, OpenEntry("Not present key", &entry)); | |
| 2102 } | |
| 2103 | |
| 2104 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntry6) { | |
| 2105 ASSERT_TRUE(CopyTestCache("bad_rankings3")); | |
| 2106 DisableFirstCleanup(); | |
| 2107 SetNewEviction(); | |
| 2108 InitCache(); | |
| 2109 | |
| 2110 // The second entry is dirty, but removing it should not corrupt the list. | |
| 2111 disk_cache::Entry* entry; | |
| 2112 ASSERT_NE(net::OK, OpenEntry("the second key", &entry)); | |
| 2113 ASSERT_EQ(net::OK, OpenEntry("the first key", &entry)); | |
| 2114 | |
| 2115 // This should not delete the cache. | |
| 2116 entry->Doom(); | |
| 2117 FlushQueueForTest(); | |
| 2118 entry->Close(); | |
| 2119 | |
| 2120 ASSERT_EQ(net::OK, OpenEntry("some other key", &entry)); | |
| 2121 entry->Close(); | |
| 2122 } | |
| 2123 | |
| 2124 // Tests handling of corrupt entries by keeping the rankings node around, with | |
| 2125 // a fatal failure. | |
| 2126 void DiskCacheBackendTest::BackendInvalidEntry7() { | |
| 2127 const int kSize = 0x3000; // 12 kB. | |
| 2128 SetMaxSize(kSize * 10); | |
| 2129 InitCache(); | |
| 2130 | |
| 2131 std::string first("some key"); | |
| 2132 std::string second("something else"); | |
| 2133 disk_cache::Entry* entry; | |
| 2134 ASSERT_EQ(net::OK, CreateEntry(first, &entry)); | |
| 2135 entry->Close(); | |
| 2136 ASSERT_EQ(net::OK, CreateEntry(second, &entry)); | |
| 2137 | |
| 2138 // Corrupt this entry. | |
| 2139 disk_cache::EntryImpl* entry_impl = | |
| 2140 static_cast<disk_cache::EntryImpl*>(entry); | |
| 2141 | |
| 2142 entry_impl->rankings()->Data()->next = 0; | |
| 2143 entry_impl->rankings()->Store(); | |
| 2144 entry->Close(); | |
| 2145 FlushQueueForTest(); | |
| 2146 EXPECT_EQ(2, cache_->GetEntryCount()); | |
| 2147 | |
| 2148 // This should detect the bad entry. | |
| 2149 EXPECT_NE(net::OK, OpenEntry(second, &entry)); | |
| 2150 EXPECT_EQ(1, cache_->GetEntryCount()); | |
| 2151 | |
| 2152 // We should delete the cache. The list still has a corrupt node. | |
| 2153 void* iter = NULL; | |
| 2154 EXPECT_NE(net::OK, OpenNextEntry(&iter, &entry)); | |
| 2155 FlushQueueForTest(); | |
| 2156 EXPECT_EQ(0, cache_->GetEntryCount()); | |
| 2157 } | |
| 2158 | |
| 2159 TEST_F(DiskCacheBackendTest, InvalidEntry7) { | |
| 2160 BackendInvalidEntry7(); | |
| 2161 } | |
| 2162 | |
| 2163 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntry7) { | |
| 2164 SetNewEviction(); | |
| 2165 BackendInvalidEntry7(); | |
| 2166 } | |
| 2167 | |
| 2168 // Tests handling of corrupt entries by keeping the rankings node around, with | |
| 2169 // a non fatal failure. | |
| 2170 void DiskCacheBackendTest::BackendInvalidEntry8() { | |
| 2171 const int kSize = 0x3000; // 12 kB | |
| 2172 SetMaxSize(kSize * 10); | |
| 2173 InitCache(); | |
| 2174 | |
| 2175 std::string first("some key"); | |
| 2176 std::string second("something else"); | |
| 2177 disk_cache::Entry* entry; | |
| 2178 ASSERT_EQ(net::OK, CreateEntry(first, &entry)); | |
| 2179 entry->Close(); | |
| 2180 ASSERT_EQ(net::OK, CreateEntry(second, &entry)); | |
| 2181 | |
| 2182 // Corrupt this entry. | |
| 2183 disk_cache::EntryImpl* entry_impl = | |
| 2184 static_cast<disk_cache::EntryImpl*>(entry); | |
| 2185 | |
| 2186 entry_impl->rankings()->Data()->contents = 0; | |
| 2187 entry_impl->rankings()->Store(); | |
| 2188 entry->Close(); | |
| 2189 FlushQueueForTest(); | |
| 2190 EXPECT_EQ(2, cache_->GetEntryCount()); | |
| 2191 | |
| 2192 // This should detect the bad entry. | |
| 2193 EXPECT_NE(net::OK, OpenEntry(second, &entry)); | |
| 2194 EXPECT_EQ(1, cache_->GetEntryCount()); | |
| 2195 | |
| 2196 // We should not delete the cache. | |
| 2197 void* iter = NULL; | |
| 2198 ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry)); | |
| 2199 entry->Close(); | |
| 2200 EXPECT_NE(net::OK, OpenNextEntry(&iter, &entry)); | |
| 2201 EXPECT_EQ(1, cache_->GetEntryCount()); | |
| 2202 } | |
| 2203 | |
| 2204 TEST_F(DiskCacheBackendTest, InvalidEntry8) { | |
| 2205 BackendInvalidEntry8(); | |
| 2206 } | |
| 2207 | |
| 2208 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntry8) { | |
| 2209 SetNewEviction(); | |
| 2210 BackendInvalidEntry8(); | |
| 2211 } | |
| 2212 | |
| 2213 // Tests handling of corrupt entries detected by enumerations. Note that these | |
| 2214 // tests (xx9 to xx11) are basically just going though slightly different | |
| 2215 // codepaths so they are tighlty coupled with the code, but that is better than | |
| 2216 // not testing error handling code. | |
| 2217 void DiskCacheBackendTest::BackendInvalidEntry9(bool eviction) { | |
| 2218 const int kSize = 0x3000; // 12 kB. | |
| 2219 SetMaxSize(kSize * 10); | |
| 2220 InitCache(); | |
| 2221 | |
| 2222 std::string first("some key"); | |
| 2223 std::string second("something else"); | |
| 2224 disk_cache::Entry* entry; | |
| 2225 ASSERT_EQ(net::OK, CreateEntry(first, &entry)); | |
| 2226 entry->Close(); | |
| 2227 ASSERT_EQ(net::OK, CreateEntry(second, &entry)); | |
| 2228 | |
| 2229 // Corrupt this entry. | |
| 2230 disk_cache::EntryImpl* entry_impl = | |
| 2231 static_cast<disk_cache::EntryImpl*>(entry); | |
| 2232 | |
| 2233 entry_impl->entry()->Data()->state = 0xbad; | |
| 2234 entry_impl->entry()->Store(); | |
| 2235 entry->Close(); | |
| 2236 FlushQueueForTest(); | |
| 2237 EXPECT_EQ(2, cache_->GetEntryCount()); | |
| 2238 | |
| 2239 if (eviction) { | |
| 2240 TrimForTest(false); | |
| 2241 EXPECT_EQ(1, cache_->GetEntryCount()); | |
| 2242 TrimForTest(false); | |
| 2243 EXPECT_EQ(1, cache_->GetEntryCount()); | |
| 2244 } else { | |
| 2245 // We should detect the problem through the list, but we should not delete | |
| 2246 // the entry, just fail the iteration. | |
| 2247 void* iter = NULL; | |
| 2248 EXPECT_NE(net::OK, OpenNextEntry(&iter, &entry)); | |
| 2249 | |
| 2250 // Now a full iteration will work, and return one entry. | |
| 2251 ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry)); | |
| 2252 entry->Close(); | |
| 2253 EXPECT_NE(net::OK, OpenNextEntry(&iter, &entry)); | |
| 2254 | |
| 2255 // This should detect what's left of the bad entry. | |
| 2256 EXPECT_NE(net::OK, OpenEntry(second, &entry)); | |
| 2257 EXPECT_EQ(2, cache_->GetEntryCount()); | |
| 2258 } | |
| 2259 DisableIntegrityCheck(); | |
| 2260 } | |
| 2261 | |
| 2262 TEST_F(DiskCacheBackendTest, InvalidEntry9) { | |
| 2263 BackendInvalidEntry9(false); | |
| 2264 } | |
| 2265 | |
| 2266 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntry9) { | |
| 2267 SetNewEviction(); | |
| 2268 BackendInvalidEntry9(false); | |
| 2269 } | |
| 2270 | |
| 2271 TEST_F(DiskCacheBackendTest, TrimInvalidEntry9) { | |
| 2272 BackendInvalidEntry9(true); | |
| 2273 } | |
| 2274 | |
| 2275 TEST_F(DiskCacheBackendTest, NewEvictionTrimInvalidEntry9) { | |
| 2276 SetNewEviction(); | |
| 2277 BackendInvalidEntry9(true); | |
| 2278 } | |
| 2279 | |
| 2280 // Tests handling of corrupt entries detected by enumerations. | |
| 2281 void DiskCacheBackendTest::BackendInvalidEntry10(bool eviction) { | |
| 2282 const int kSize = 0x3000; // 12 kB. | |
| 2283 SetMaxSize(kSize * 10); | |
| 2284 SetNewEviction(); | |
| 2285 InitCache(); | |
| 2286 | |
| 2287 std::string first("some key"); | |
| 2288 std::string second("something else"); | |
| 2289 disk_cache::Entry* entry; | |
| 2290 ASSERT_EQ(net::OK, CreateEntry(first, &entry)); | |
| 2291 entry->Close(); | |
| 2292 ASSERT_EQ(net::OK, OpenEntry(first, &entry)); | |
| 2293 EXPECT_EQ(0, WriteData(entry, 0, 200, NULL, 0, false)); | |
| 2294 entry->Close(); | |
| 2295 ASSERT_EQ(net::OK, CreateEntry(second, &entry)); | |
| 2296 | |
| 2297 // Corrupt this entry. | |
| 2298 disk_cache::EntryImpl* entry_impl = | |
| 2299 static_cast<disk_cache::EntryImpl*>(entry); | |
| 2300 | |
| 2301 entry_impl->entry()->Data()->state = 0xbad; | |
| 2302 entry_impl->entry()->Store(); | |
| 2303 entry->Close(); | |
| 2304 ASSERT_EQ(net::OK, CreateEntry("third", &entry)); | |
| 2305 entry->Close(); | |
| 2306 EXPECT_EQ(3, cache_->GetEntryCount()); | |
| 2307 | |
| 2308 // We have: | |
| 2309 // List 0: third -> second (bad). | |
| 2310 // List 1: first. | |
| 2311 | |
| 2312 if (eviction) { | |
| 2313 // Detection order: second -> first -> third. | |
| 2314 TrimForTest(false); | |
| 2315 EXPECT_EQ(3, cache_->GetEntryCount()); | |
| 2316 TrimForTest(false); | |
| 2317 EXPECT_EQ(2, cache_->GetEntryCount()); | |
| 2318 TrimForTest(false); | |
| 2319 EXPECT_EQ(1, cache_->GetEntryCount()); | |
| 2320 } else { | |
| 2321 // Detection order: third -> second -> first. | |
| 2322 // We should detect the problem through the list, but we should not delete | |
| 2323 // the entry. | |
| 2324 void* iter = NULL; | |
| 2325 ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry)); | |
| 2326 entry->Close(); | |
| 2327 ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry)); | |
| 2328 EXPECT_EQ(first, entry->GetKey()); | |
| 2329 entry->Close(); | |
| 2330 EXPECT_NE(net::OK, OpenNextEntry(&iter, &entry)); | |
| 2331 } | |
| 2332 DisableIntegrityCheck(); | |
| 2333 } | |
| 2334 | |
| 2335 TEST_F(DiskCacheBackendTest, InvalidEntry10) { | |
| 2336 BackendInvalidEntry10(false); | |
| 2337 } | |
| 2338 | |
| 2339 TEST_F(DiskCacheBackendTest, TrimInvalidEntry10) { | |
| 2340 BackendInvalidEntry10(true); | |
| 2341 } | |
| 2342 | |
| 2343 // Tests handling of corrupt entries detected by enumerations. | |
| 2344 void DiskCacheBackendTest::BackendInvalidEntry11(bool eviction) { | |
| 2345 const int kSize = 0x3000; // 12 kB. | |
| 2346 SetMaxSize(kSize * 10); | |
| 2347 SetNewEviction(); | |
| 2348 InitCache(); | |
| 2349 | |
| 2350 std::string first("some key"); | |
| 2351 std::string second("something else"); | |
| 2352 disk_cache::Entry* entry; | |
| 2353 ASSERT_EQ(net::OK, CreateEntry(first, &entry)); | |
| 2354 entry->Close(); | |
| 2355 ASSERT_EQ(net::OK, OpenEntry(first, &entry)); | |
| 2356 EXPECT_EQ(0, WriteData(entry, 0, 200, NULL, 0, false)); | |
| 2357 entry->Close(); | |
| 2358 ASSERT_EQ(net::OK, CreateEntry(second, &entry)); | |
| 2359 entry->Close(); | |
| 2360 ASSERT_EQ(net::OK, OpenEntry(second, &entry)); | |
| 2361 EXPECT_EQ(0, WriteData(entry, 0, 200, NULL, 0, false)); | |
| 2362 | |
| 2363 // Corrupt this entry. | |
| 2364 disk_cache::EntryImpl* entry_impl = | |
| 2365 static_cast<disk_cache::EntryImpl*>(entry); | |
| 2366 | |
| 2367 entry_impl->entry()->Data()->state = 0xbad; | |
| 2368 entry_impl->entry()->Store(); | |
| 2369 entry->Close(); | |
| 2370 ASSERT_EQ(net::OK, CreateEntry("third", &entry)); | |
| 2371 entry->Close(); | |
| 2372 FlushQueueForTest(); | |
| 2373 EXPECT_EQ(3, cache_->GetEntryCount()); | |
| 2374 | |
| 2375 // We have: | |
| 2376 // List 0: third. | |
| 2377 // List 1: second (bad) -> first. | |
| 2378 | |
| 2379 if (eviction) { | |
| 2380 // Detection order: third -> first -> second. | |
| 2381 TrimForTest(false); | |
| 2382 EXPECT_EQ(2, cache_->GetEntryCount()); | |
| 2383 TrimForTest(false); | |
| 2384 EXPECT_EQ(1, cache_->GetEntryCount()); | |
| 2385 TrimForTest(false); | |
| 2386 EXPECT_EQ(1, cache_->GetEntryCount()); | |
| 2387 } else { | |
| 2388 // Detection order: third -> second. | |
| 2389 // We should detect the problem through the list, but we should not delete | |
| 2390 // the entry, just fail the iteration. | |
| 2391 void* iter = NULL; | |
| 2392 ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry)); | |
| 2393 entry->Close(); | |
| 2394 EXPECT_NE(net::OK, OpenNextEntry(&iter, &entry)); | |
| 2395 | |
| 2396 // Now a full iteration will work, and return two entries. | |
| 2397 ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry)); | |
| 2398 entry->Close(); | |
| 2399 ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry)); | |
| 2400 entry->Close(); | |
| 2401 EXPECT_NE(net::OK, OpenNextEntry(&iter, &entry)); | |
| 2402 } | |
| 2403 DisableIntegrityCheck(); | |
| 2404 } | |
| 2405 | |
| 2406 TEST_F(DiskCacheBackendTest, InvalidEntry11) { | |
| 2407 BackendInvalidEntry11(false); | |
| 2408 } | |
| 2409 | |
| 2410 TEST_F(DiskCacheBackendTest, TrimInvalidEntry11) { | |
| 2411 BackendInvalidEntry11(true); | |
| 2412 } | |
| 2413 | |
| 2414 // Tests handling of corrupt entries in the middle of a long eviction run. | |
| 2415 void DiskCacheBackendTest::BackendTrimInvalidEntry12() { | |
| 2416 const int kSize = 0x3000; // 12 kB | |
| 2417 SetMaxSize(kSize * 10); | |
| 2418 InitCache(); | |
| 2419 | |
| 2420 std::string first("some key"); | |
| 2421 std::string second("something else"); | |
| 2422 disk_cache::Entry* entry; | |
| 2423 ASSERT_EQ(net::OK, CreateEntry(first, &entry)); | |
| 2424 entry->Close(); | |
| 2425 ASSERT_EQ(net::OK, CreateEntry(second, &entry)); | |
| 2426 | |
| 2427 // Corrupt this entry. | |
| 2428 disk_cache::EntryImpl* entry_impl = | |
| 2429 static_cast<disk_cache::EntryImpl*>(entry); | |
| 2430 | |
| 2431 entry_impl->entry()->Data()->state = 0xbad; | |
| 2432 entry_impl->entry()->Store(); | |
| 2433 entry->Close(); | |
| 2434 ASSERT_EQ(net::OK, CreateEntry("third", &entry)); | |
| 2435 entry->Close(); | |
| 2436 ASSERT_EQ(net::OK, CreateEntry("fourth", &entry)); | |
| 2437 TrimForTest(true); | |
| 2438 EXPECT_EQ(1, cache_->GetEntryCount()); | |
| 2439 entry->Close(); | |
| 2440 DisableIntegrityCheck(); | |
| 2441 } | |
| 2442 | |
| 2443 TEST_F(DiskCacheBackendTest, TrimInvalidEntry12) { | |
| 2444 BackendTrimInvalidEntry12(); | |
| 2445 } | |
| 2446 | |
| 2447 TEST_F(DiskCacheBackendTest, NewEvictionTrimInvalidEntry12) { | |
| 2448 SetNewEviction(); | |
| 2449 BackendTrimInvalidEntry12(); | |
| 2450 } | |
| 2451 | |
| 2452 // We want to be able to deal with messed up entries on disk. | |
| 2453 void DiskCacheBackendTest::BackendInvalidRankings2() { | |
| 2454 ASSERT_TRUE(CopyTestCache("bad_rankings")); | |
| 2455 DisableFirstCleanup(); | |
| 2456 InitCache(); | |
| 2457 | |
| 2458 disk_cache::Entry *entry1, *entry2; | |
| 2459 EXPECT_NE(net::OK, OpenEntry("the first key", &entry1)); | |
| 2460 ASSERT_EQ(net::OK, OpenEntry("some other key", &entry2)); | |
| 2461 entry2->Close(); | |
| 2462 | |
| 2463 // CheckCacheIntegrity will fail at this point. | |
| 2464 DisableIntegrityCheck(); | |
| 2465 } | |
| 2466 | |
| 2467 TEST_F(DiskCacheBackendTest, InvalidRankings2) { | |
| 2468 BackendInvalidRankings2(); | |
| 2469 } | |
| 2470 | |
| 2471 TEST_F(DiskCacheBackendTest, NewEvictionInvalidRankings2) { | |
| 2472 SetNewEviction(); | |
| 2473 BackendInvalidRankings2(); | |
| 2474 } | |
| 2475 | |
| 2476 // If the LRU is corrupt, we delete the cache. | |
| 2477 void DiskCacheBackendTest::BackendInvalidRankings() { | |
| 2478 disk_cache::Entry* entry; | |
| 2479 void* iter = NULL; | |
| 2480 ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry)); | |
| 2481 entry->Close(); | |
| 2482 EXPECT_EQ(2, cache_->GetEntryCount()); | |
| 2483 | |
| 2484 EXPECT_NE(net::OK, OpenNextEntry(&iter, &entry)); | |
| 2485 FlushQueueForTest(); // Allow the restart to finish. | |
| 2486 EXPECT_EQ(0, cache_->GetEntryCount()); | |
| 2487 } | |
| 2488 | |
| 2489 TEST_F(DiskCacheBackendTest, InvalidRankingsSuccess) { | |
| 2490 ASSERT_TRUE(CopyTestCache("bad_rankings")); | |
| 2491 DisableFirstCleanup(); | |
| 2492 InitCache(); | |
| 2493 BackendInvalidRankings(); | |
| 2494 } | |
| 2495 | |
| 2496 TEST_F(DiskCacheBackendTest, NewEvictionInvalidRankingsSuccess) { | |
| 2497 ASSERT_TRUE(CopyTestCache("bad_rankings")); | |
| 2498 DisableFirstCleanup(); | |
| 2499 SetNewEviction(); | |
| 2500 InitCache(); | |
| 2501 BackendInvalidRankings(); | |
| 2502 } | |
| 2503 | |
| 2504 TEST_F(DiskCacheBackendTest, InvalidRankingsFailure) { | |
| 2505 ASSERT_TRUE(CopyTestCache("bad_rankings")); | |
| 2506 DisableFirstCleanup(); | |
| 2507 InitCache(); | |
| 2508 SetTestMode(); // Fail cache reinitialization. | |
| 2509 BackendInvalidRankings(); | |
| 2510 } | |
| 2511 | |
| 2512 TEST_F(DiskCacheBackendTest, NewEvictionInvalidRankingsFailure) { | |
| 2513 ASSERT_TRUE(CopyTestCache("bad_rankings")); | |
| 2514 DisableFirstCleanup(); | |
| 2515 SetNewEviction(); | |
| 2516 InitCache(); | |
| 2517 SetTestMode(); // Fail cache reinitialization. | |
| 2518 BackendInvalidRankings(); | |
| 2519 } | |
| 2520 | |
| 2521 // If the LRU is corrupt and we have open entries, we disable the cache. | |
| 2522 void DiskCacheBackendTest::BackendDisable() { | |
| 2523 disk_cache::Entry *entry1, *entry2; | |
| 2524 void* iter = NULL; | |
| 2525 ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry1)); | |
| 2526 | |
| 2527 EXPECT_NE(net::OK, OpenNextEntry(&iter, &entry2)); | |
| 2528 EXPECT_EQ(0, cache_->GetEntryCount()); | |
| 2529 EXPECT_NE(net::OK, CreateEntry("Something new", &entry2)); | |
| 2530 | |
| 2531 entry1->Close(); | |
| 2532 FlushQueueForTest(); // Flushing the Close posts a task to restart the cache. | |
| 2533 FlushQueueForTest(); // This one actually allows that task to complete. | |
| 2534 | |
| 2535 EXPECT_EQ(0, cache_->GetEntryCount()); | |
| 2536 } | |
| 2537 | |
| 2538 TEST_F(DiskCacheBackendTest, DisableSuccess) { | |
| 2539 ASSERT_TRUE(CopyTestCache("bad_rankings")); | |
| 2540 DisableFirstCleanup(); | |
| 2541 InitCache(); | |
| 2542 BackendDisable(); | |
| 2543 } | |
| 2544 | |
| 2545 TEST_F(DiskCacheBackendTest, NewEvictionDisableSuccess) { | |
| 2546 ASSERT_TRUE(CopyTestCache("bad_rankings")); | |
| 2547 DisableFirstCleanup(); | |
| 2548 SetNewEviction(); | |
| 2549 InitCache(); | |
| 2550 BackendDisable(); | |
| 2551 } | |
| 2552 | |
| 2553 TEST_F(DiskCacheBackendTest, DisableFailure) { | |
| 2554 ASSERT_TRUE(CopyTestCache("bad_rankings")); | |
| 2555 DisableFirstCleanup(); | |
| 2556 InitCache(); | |
| 2557 SetTestMode(); // Fail cache reinitialization. | |
| 2558 BackendDisable(); | |
| 2559 } | |
| 2560 | |
| 2561 TEST_F(DiskCacheBackendTest, NewEvictionDisableFailure) { | |
| 2562 ASSERT_TRUE(CopyTestCache("bad_rankings")); | |
| 2563 DisableFirstCleanup(); | |
| 2564 SetNewEviction(); | |
| 2565 InitCache(); | |
| 2566 SetTestMode(); // Fail cache reinitialization. | |
| 2567 BackendDisable(); | |
| 2568 } | |
| 2569 | |
| 2570 // This is another type of corruption on the LRU; disable the cache. | |
| 2571 void DiskCacheBackendTest::BackendDisable2() { | |
| 2572 EXPECT_EQ(8, cache_->GetEntryCount()); | |
| 2573 | |
| 2574 disk_cache::Entry* entry; | |
| 2575 void* iter = NULL; | |
| 2576 int count = 0; | |
| 2577 while (OpenNextEntry(&iter, &entry) == net::OK) { | |
| 2578 ASSERT_TRUE(NULL != entry); | |
| 2579 entry->Close(); | |
| 2580 count++; | |
| 2581 ASSERT_LT(count, 9); | |
| 2582 }; | |
| 2583 | |
| 2584 FlushQueueForTest(); | |
| 2585 EXPECT_EQ(0, cache_->GetEntryCount()); | |
| 2586 } | |
| 2587 | |
| 2588 TEST_F(DiskCacheBackendTest, DisableSuccess2) { | |
| 2589 ASSERT_TRUE(CopyTestCache("list_loop")); | |
| 2590 DisableFirstCleanup(); | |
| 2591 InitCache(); | |
| 2592 BackendDisable2(); | |
| 2593 } | |
| 2594 | |
| 2595 TEST_F(DiskCacheBackendTest, NewEvictionDisableSuccess2) { | |
| 2596 ASSERT_TRUE(CopyTestCache("list_loop")); | |
| 2597 DisableFirstCleanup(); | |
| 2598 SetNewEviction(); | |
| 2599 InitCache(); | |
| 2600 BackendDisable2(); | |
| 2601 } | |
| 2602 | |
| 2603 TEST_F(DiskCacheBackendTest, DisableFailure2) { | |
| 2604 ASSERT_TRUE(CopyTestCache("list_loop")); | |
| 2605 DisableFirstCleanup(); | |
| 2606 InitCache(); | |
| 2607 SetTestMode(); // Fail cache reinitialization. | |
| 2608 BackendDisable2(); | |
| 2609 } | |
| 2610 | |
| 2611 TEST_F(DiskCacheBackendTest, NewEvictionDisableFailure2) { | |
| 2612 ASSERT_TRUE(CopyTestCache("list_loop")); | |
| 2613 DisableFirstCleanup(); | |
| 2614 SetNewEviction(); | |
| 2615 InitCache(); | |
| 2616 SetTestMode(); // Fail cache reinitialization. | |
| 2617 BackendDisable2(); | |
| 2618 } | |
| 2619 | |
| 2620 // If the index size changes when we disable the cache, we should not crash. | |
| 2621 void DiskCacheBackendTest::BackendDisable3() { | |
| 2622 disk_cache::Entry *entry1, *entry2; | |
| 2623 void* iter = NULL; | |
| 2624 EXPECT_EQ(2, cache_->GetEntryCount()); | |
| 2625 ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry1)); | |
| 2626 entry1->Close(); | |
| 2627 | |
| 2628 EXPECT_NE(net::OK, OpenNextEntry(&iter, &entry2)); | |
| 2629 FlushQueueForTest(); | |
| 2630 | |
| 2631 ASSERT_EQ(net::OK, CreateEntry("Something new", &entry2)); | |
| 2632 entry2->Close(); | |
| 2633 | |
| 2634 EXPECT_EQ(1, cache_->GetEntryCount()); | |
| 2635 } | |
| 2636 | |
| 2637 TEST_F(DiskCacheBackendTest, DisableSuccess3) { | |
| 2638 ASSERT_TRUE(CopyTestCache("bad_rankings2")); | |
| 2639 DisableFirstCleanup(); | |
| 2640 SetMaxSize(20 * 1024 * 1024); | |
| 2641 InitCache(); | |
| 2642 BackendDisable3(); | |
| 2643 } | |
| 2644 | |
| 2645 TEST_F(DiskCacheBackendTest, NewEvictionDisableSuccess3) { | |
| 2646 ASSERT_TRUE(CopyTestCache("bad_rankings2")); | |
| 2647 DisableFirstCleanup(); | |
| 2648 SetMaxSize(20 * 1024 * 1024); | |
| 2649 SetNewEviction(); | |
| 2650 InitCache(); | |
| 2651 BackendDisable3(); | |
| 2652 } | |
| 2653 | |
| 2654 // If we disable the cache, already open entries should work as far as possible. | |
| 2655 void DiskCacheBackendTest::BackendDisable4() { | |
| 2656 disk_cache::Entry *entry1, *entry2, *entry3, *entry4; | |
| 2657 void* iter = NULL; | |
| 2658 ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry1)); | |
| 2659 | |
| 2660 char key2[2000]; | |
| 2661 char key3[20000]; | |
| 2662 CacheTestFillBuffer(key2, sizeof(key2), true); | |
| 2663 CacheTestFillBuffer(key3, sizeof(key3), true); | |
| 2664 key2[sizeof(key2) - 1] = '\0'; | |
| 2665 key3[sizeof(key3) - 1] = '\0'; | |
| 2666 ASSERT_EQ(net::OK, CreateEntry(key2, &entry2)); | |
| 2667 ASSERT_EQ(net::OK, CreateEntry(key3, &entry3)); | |
| 2668 | |
| 2669 const int kBufSize = 20000; | |
| 2670 scoped_refptr<net::IOBuffer> buf(new net::IOBuffer(kBufSize)); | |
| 2671 memset(buf->data(), 0, kBufSize); | |
| 2672 EXPECT_EQ(100, WriteData(entry2, 0, 0, buf.get(), 100, false)); | |
| 2673 EXPECT_EQ(kBufSize, WriteData(entry3, 0, 0, buf.get(), kBufSize, false)); | |
| 2674 | |
| 2675 // This line should disable the cache but not delete it. | |
| 2676 EXPECT_NE(net::OK, OpenNextEntry(&iter, &entry4)); | |
| 2677 EXPECT_EQ(0, cache_->GetEntryCount()); | |
| 2678 | |
| 2679 EXPECT_NE(net::OK, CreateEntry("cache is disabled", &entry4)); | |
| 2680 | |
| 2681 EXPECT_EQ(100, ReadData(entry2, 0, 0, buf.get(), 100)); | |
| 2682 EXPECT_EQ(100, WriteData(entry2, 0, 0, buf.get(), 100, false)); | |
| 2683 EXPECT_EQ(100, WriteData(entry2, 1, 0, buf.get(), 100, false)); | |
| 2684 | |
| 2685 EXPECT_EQ(kBufSize, ReadData(entry3, 0, 0, buf.get(), kBufSize)); | |
| 2686 EXPECT_EQ(kBufSize, WriteData(entry3, 0, 0, buf.get(), kBufSize, false)); | |
| 2687 EXPECT_EQ(kBufSize, WriteData(entry3, 1, 0, buf.get(), kBufSize, false)); | |
| 2688 | |
| 2689 std::string key = entry2->GetKey(); | |
| 2690 EXPECT_EQ(sizeof(key2) - 1, key.size()); | |
| 2691 key = entry3->GetKey(); | |
| 2692 EXPECT_EQ(sizeof(key3) - 1, key.size()); | |
| 2693 | |
| 2694 entry1->Close(); | |
| 2695 entry2->Close(); | |
| 2696 entry3->Close(); | |
| 2697 FlushQueueForTest(); // Flushing the Close posts a task to restart the cache. | |
| 2698 FlushQueueForTest(); // This one actually allows that task to complete. | |
| 2699 | |
| 2700 EXPECT_EQ(0, cache_->GetEntryCount()); | |
| 2701 } | |
| 2702 | |
| 2703 TEST_F(DiskCacheBackendTest, DisableSuccess4) { | |
| 2704 ASSERT_TRUE(CopyTestCache("bad_rankings")); | |
| 2705 DisableFirstCleanup(); | |
| 2706 InitCache(); | |
| 2707 BackendDisable4(); | |
| 2708 } | |
| 2709 | |
| 2710 TEST_F(DiskCacheBackendTest, NewEvictionDisableSuccess4) { | |
| 2711 ASSERT_TRUE(CopyTestCache("bad_rankings")); | |
| 2712 DisableFirstCleanup(); | |
| 2713 SetNewEviction(); | |
| 2714 InitCache(); | |
| 2715 BackendDisable4(); | |
| 2716 } | |
| 2717 | |
| 2718 TEST_F(DiskCacheTest, Backend_UsageStatsTimer) { | |
| 2719 MessageLoopHelper helper; | |
| 2720 | |
| 2721 ASSERT_TRUE(CleanupCacheDir()); | |
| 2722 scoped_ptr<disk_cache::BackendImpl> cache; | |
| 2723 cache.reset(new disk_cache::BackendImpl( | |
| 2724 cache_path_, base::MessageLoopProxy::current().get(), NULL)); | |
| 2725 ASSERT_TRUE(NULL != cache.get()); | |
| 2726 cache->SetUnitTestMode(); | |
| 2727 ASSERT_EQ(net::OK, cache->SyncInit()); | |
| 2728 | |
| 2729 // Wait for a callback that never comes... about 2 secs :). The message loop | |
| 2730 // has to run to allow invocation of the usage timer. | |
| 2731 helper.WaitUntilCacheIoFinished(1); | |
| 2732 } | |
| 2733 | |
| 2734 TEST_F(DiskCacheBackendTest, TimerNotCreated) { | |
| 2735 ASSERT_TRUE(CopyTestCache("wrong_version")); | |
| 2736 | |
| 2737 scoped_ptr<disk_cache::BackendImpl> cache; | |
| 2738 cache.reset(new disk_cache::BackendImpl( | |
| 2739 cache_path_, base::MessageLoopProxy::current().get(), NULL)); | |
| 2740 ASSERT_TRUE(NULL != cache.get()); | |
| 2741 cache->SetUnitTestMode(); | |
| 2742 ASSERT_NE(net::OK, cache->SyncInit()); | |
| 2743 | |
| 2744 ASSERT_TRUE(NULL == cache->GetTimerForTest()); | |
| 2745 | |
| 2746 DisableIntegrityCheck(); | |
| 2747 } | |
| 2748 | |
| 2749 TEST_F(DiskCacheBackendTest, Backend_UsageStats) { | |
| 2750 InitCache(); | |
| 2751 disk_cache::Entry* entry; | |
| 2752 ASSERT_EQ(net::OK, CreateEntry("key", &entry)); | |
| 2753 entry->Close(); | |
| 2754 FlushQueueForTest(); | |
| 2755 | |
| 2756 disk_cache::StatsItems stats; | |
| 2757 cache_->GetStats(&stats); | |
| 2758 EXPECT_FALSE(stats.empty()); | |
| 2759 | |
| 2760 disk_cache::StatsItems::value_type hits("Create hit", "0x1"); | |
| 2761 EXPECT_EQ(1, std::count(stats.begin(), stats.end(), hits)); | |
| 2762 | |
| 2763 cache_.reset(); | |
| 2764 | |
| 2765 // Now open the cache and verify that the stats are still there. | |
| 2766 DisableFirstCleanup(); | |
| 2767 InitCache(); | |
| 2768 EXPECT_EQ(1, cache_->GetEntryCount()); | |
| 2769 | |
| 2770 stats.clear(); | |
| 2771 cache_->GetStats(&stats); | |
| 2772 EXPECT_FALSE(stats.empty()); | |
| 2773 | |
| 2774 EXPECT_EQ(1, std::count(stats.begin(), stats.end(), hits)); | |
| 2775 } | |
| 2776 | |
| 2777 void DiskCacheBackendTest::BackendDoomAll() { | |
| 2778 InitCache(); | |
| 2779 | |
| 2780 disk_cache::Entry *entry1, *entry2; | |
| 2781 ASSERT_EQ(net::OK, CreateEntry("first", &entry1)); | |
| 2782 ASSERT_EQ(net::OK, CreateEntry("second", &entry2)); | |
| 2783 entry1->Close(); | |
| 2784 entry2->Close(); | |
| 2785 | |
| 2786 ASSERT_EQ(net::OK, CreateEntry("third", &entry1)); | |
| 2787 ASSERT_EQ(net::OK, CreateEntry("fourth", &entry2)); | |
| 2788 | |
| 2789 ASSERT_EQ(4, cache_->GetEntryCount()); | |
| 2790 EXPECT_EQ(net::OK, DoomAllEntries()); | |
| 2791 ASSERT_EQ(0, cache_->GetEntryCount()); | |
| 2792 | |
| 2793 // We should stop posting tasks at some point (if we post any). | |
| 2794 base::MessageLoop::current()->RunUntilIdle(); | |
| 2795 | |
| 2796 disk_cache::Entry *entry3, *entry4; | |
| 2797 EXPECT_NE(net::OK, OpenEntry("third", &entry3)); | |
| 2798 ASSERT_EQ(net::OK, CreateEntry("third", &entry3)); | |
| 2799 ASSERT_EQ(net::OK, CreateEntry("fourth", &entry4)); | |
| 2800 | |
| 2801 EXPECT_EQ(net::OK, DoomAllEntries()); | |
| 2802 ASSERT_EQ(0, cache_->GetEntryCount()); | |
| 2803 | |
| 2804 entry1->Close(); | |
| 2805 entry2->Close(); | |
| 2806 entry3->Doom(); // The entry should be already doomed, but this must work. | |
| 2807 entry3->Close(); | |
| 2808 entry4->Close(); | |
| 2809 | |
| 2810 // Now try with all references released. | |
| 2811 ASSERT_EQ(net::OK, CreateEntry("third", &entry1)); | |
| 2812 ASSERT_EQ(net::OK, CreateEntry("fourth", &entry2)); | |
| 2813 entry1->Close(); | |
| 2814 entry2->Close(); | |
| 2815 | |
| 2816 ASSERT_EQ(2, cache_->GetEntryCount()); | |
| 2817 EXPECT_EQ(net::OK, DoomAllEntries()); | |
| 2818 ASSERT_EQ(0, cache_->GetEntryCount()); | |
| 2819 | |
| 2820 EXPECT_EQ(net::OK, DoomAllEntries()); | |
| 2821 } | |
| 2822 | |
| 2823 TEST_F(DiskCacheBackendTest, DoomAll) { | |
| 2824 BackendDoomAll(); | |
| 2825 } | |
| 2826 | |
| 2827 TEST_F(DiskCacheBackendTest, NewEvictionDoomAll) { | |
| 2828 SetNewEviction(); | |
| 2829 BackendDoomAll(); | |
| 2830 } | |
| 2831 | |
| 2832 TEST_F(DiskCacheBackendTest, MemoryOnlyDoomAll) { | |
| 2833 SetMemoryOnlyMode(); | |
| 2834 BackendDoomAll(); | |
| 2835 } | |
| 2836 | |
| 2837 TEST_F(DiskCacheBackendTest, AppCacheOnlyDoomAll) { | |
| 2838 SetCacheType(net::APP_CACHE); | |
| 2839 BackendDoomAll(); | |
| 2840 } | |
| 2841 | |
| 2842 TEST_F(DiskCacheBackendTest, ShaderCacheOnlyDoomAll) { | |
| 2843 SetCacheType(net::SHADER_CACHE); | |
| 2844 BackendDoomAll(); | |
| 2845 } | |
| 2846 | |
| 2847 // If the index size changes when we doom the cache, we should not crash. | |
| 2848 void DiskCacheBackendTest::BackendDoomAll2() { | |
| 2849 EXPECT_EQ(2, cache_->GetEntryCount()); | |
| 2850 EXPECT_EQ(net::OK, DoomAllEntries()); | |
| 2851 | |
| 2852 disk_cache::Entry* entry; | |
| 2853 ASSERT_EQ(net::OK, CreateEntry("Something new", &entry)); | |
| 2854 entry->Close(); | |
| 2855 | |
| 2856 EXPECT_EQ(1, cache_->GetEntryCount()); | |
| 2857 } | |
| 2858 | |
| 2859 TEST_F(DiskCacheBackendTest, DoomAll2) { | |
| 2860 ASSERT_TRUE(CopyTestCache("bad_rankings2")); | |
| 2861 DisableFirstCleanup(); | |
| 2862 SetMaxSize(20 * 1024 * 1024); | |
| 2863 InitCache(); | |
| 2864 BackendDoomAll2(); | |
| 2865 } | |
| 2866 | |
| 2867 TEST_F(DiskCacheBackendTest, NewEvictionDoomAll2) { | |
| 2868 ASSERT_TRUE(CopyTestCache("bad_rankings2")); | |
| 2869 DisableFirstCleanup(); | |
| 2870 SetMaxSize(20 * 1024 * 1024); | |
| 2871 SetNewEviction(); | |
| 2872 InitCache(); | |
| 2873 BackendDoomAll2(); | |
| 2874 } | |
| 2875 | |
| 2876 // We should be able to create the same entry on multiple simultaneous instances | |
| 2877 // of the cache. | |
| 2878 TEST_F(DiskCacheTest, MultipleInstances) { | |
| 2879 base::ScopedTempDir store1, store2; | |
| 2880 ASSERT_TRUE(store1.CreateUniqueTempDir()); | |
| 2881 ASSERT_TRUE(store2.CreateUniqueTempDir()); | |
| 2882 | |
| 2883 base::Thread cache_thread("CacheThread"); | |
| 2884 ASSERT_TRUE(cache_thread.StartWithOptions( | |
| 2885 base::Thread::Options(base::MessageLoop::TYPE_IO, 0))); | |
| 2886 net::TestCompletionCallback cb; | |
| 2887 | |
| 2888 const int kNumberOfCaches = 2; | |
| 2889 scoped_ptr<disk_cache::Backend> cache[kNumberOfCaches]; | |
| 2890 | |
| 2891 int rv = | |
| 2892 disk_cache::CreateCacheBackend(net::DISK_CACHE, | |
| 2893 net::CACHE_BACKEND_DEFAULT, | |
| 2894 store1.path(), | |
| 2895 0, | |
| 2896 false, | |
| 2897 cache_thread.message_loop_proxy().get(), | |
| 2898 NULL, | |
| 2899 &cache[0], | |
| 2900 cb.callback()); | |
| 2901 ASSERT_EQ(net::OK, cb.GetResult(rv)); | |
| 2902 rv = disk_cache::CreateCacheBackend(net::MEDIA_CACHE, | |
| 2903 net::CACHE_BACKEND_DEFAULT, | |
| 2904 store2.path(), | |
| 2905 0, | |
| 2906 false, | |
| 2907 cache_thread.message_loop_proxy().get(), | |
| 2908 NULL, | |
| 2909 &cache[1], | |
| 2910 cb.callback()); | |
| 2911 ASSERT_EQ(net::OK, cb.GetResult(rv)); | |
| 2912 | |
| 2913 ASSERT_TRUE(cache[0].get() != NULL && cache[1].get() != NULL); | |
| 2914 | |
| 2915 std::string key("the first key"); | |
| 2916 disk_cache::Entry* entry; | |
| 2917 for (int i = 0; i < kNumberOfCaches; i++) { | |
| 2918 rv = cache[i]->CreateEntry(key, &entry, cb.callback()); | |
| 2919 ASSERT_EQ(net::OK, cb.GetResult(rv)); | |
| 2920 entry->Close(); | |
| 2921 } | |
| 2922 } | |
| 2923 | |
| 2924 // Test the six regions of the curve that determines the max cache size. | |
| 2925 TEST_F(DiskCacheTest, AutomaticMaxSize) { | |
| 2926 using disk_cache::kDefaultCacheSize; | |
| 2927 int64 large_size = kDefaultCacheSize; | |
| 2928 | |
| 2929 // Region 1: expected = available * 0.8 | |
| 2930 EXPECT_EQ((kDefaultCacheSize - 1) * 8 / 10, | |
| 2931 disk_cache::PreferredCacheSize(large_size - 1)); | |
| 2932 EXPECT_EQ(kDefaultCacheSize * 8 / 10, | |
| 2933 disk_cache::PreferredCacheSize(large_size)); | |
| 2934 EXPECT_EQ(kDefaultCacheSize - 1, | |
| 2935 disk_cache::PreferredCacheSize(large_size * 10 / 8 - 1)); | |
| 2936 | |
| 2937 // Region 2: expected = default_size | |
| 2938 EXPECT_EQ(kDefaultCacheSize, | |
| 2939 disk_cache::PreferredCacheSize(large_size * 10 / 8)); | |
| 2940 EXPECT_EQ(kDefaultCacheSize, | |
| 2941 disk_cache::PreferredCacheSize(large_size * 10 - 1)); | |
| 2942 | |
| 2943 // Region 3: expected = available * 0.1 | |
| 2944 EXPECT_EQ(kDefaultCacheSize, | |
| 2945 disk_cache::PreferredCacheSize(large_size * 10)); | |
| 2946 EXPECT_EQ((kDefaultCacheSize * 25 - 1) / 10, | |
| 2947 disk_cache::PreferredCacheSize(large_size * 25 - 1)); | |
| 2948 | |
| 2949 // Region 4: expected = default_size * 2.5 | |
| 2950 EXPECT_EQ(kDefaultCacheSize * 25 / 10, | |
| 2951 disk_cache::PreferredCacheSize(large_size * 25)); | |
| 2952 EXPECT_EQ(kDefaultCacheSize * 25 / 10, | |
| 2953 disk_cache::PreferredCacheSize(large_size * 100 - 1)); | |
| 2954 EXPECT_EQ(kDefaultCacheSize * 25 / 10, | |
| 2955 disk_cache::PreferredCacheSize(large_size * 100)); | |
| 2956 EXPECT_EQ(kDefaultCacheSize * 25 / 10, | |
| 2957 disk_cache::PreferredCacheSize(large_size * 250 - 1)); | |
| 2958 | |
| 2959 // Region 5: expected = available * 0.1 | |
| 2960 int64 largest_size = kDefaultCacheSize * 4; | |
| 2961 EXPECT_EQ(kDefaultCacheSize * 25 / 10, | |
| 2962 disk_cache::PreferredCacheSize(large_size * 250)); | |
| 2963 EXPECT_EQ(largest_size - 1, | |
| 2964 disk_cache::PreferredCacheSize(largest_size * 100 - 1)); | |
| 2965 | |
| 2966 // Region 6: expected = largest possible size | |
| 2967 EXPECT_EQ(largest_size, | |
| 2968 disk_cache::PreferredCacheSize(largest_size * 100)); | |
| 2969 EXPECT_EQ(largest_size, | |
| 2970 disk_cache::PreferredCacheSize(largest_size * 10000)); | |
| 2971 } | |
| 2972 | |
| 2973 // Tests that we can "migrate" a running instance from one experiment group to | |
| 2974 // another. | |
| 2975 TEST_F(DiskCacheBackendTest, Histograms) { | |
| 2976 InitCache(); | |
| 2977 disk_cache::BackendImpl* backend_ = cache_impl_; // Needed be the macro. | |
| 2978 | |
| 2979 for (int i = 1; i < 3; i++) { | |
| 2980 CACHE_UMA(HOURS, "FillupTime", i, 28); | |
| 2981 } | |
| 2982 } | |
| 2983 | |
| 2984 // Make sure that we keep the total memory used by the internal buffers under | |
| 2985 // control. | |
| 2986 TEST_F(DiskCacheBackendTest, TotalBuffersSize1) { | |
| 2987 InitCache(); | |
| 2988 std::string key("the first key"); | |
| 2989 disk_cache::Entry* entry; | |
| 2990 ASSERT_EQ(net::OK, CreateEntry(key, &entry)); | |
| 2991 | |
| 2992 const int kSize = 200; | |
| 2993 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize)); | |
| 2994 CacheTestFillBuffer(buffer->data(), kSize, true); | |
| 2995 | |
| 2996 for (int i = 0; i < 10; i++) { | |
| 2997 SCOPED_TRACE(i); | |
| 2998 // Allocate 2MB for this entry. | |
| 2999 EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, true)); | |
| 3000 EXPECT_EQ(kSize, WriteData(entry, 1, 0, buffer.get(), kSize, true)); | |
| 3001 EXPECT_EQ(kSize, | |
| 3002 WriteData(entry, 0, 1024 * 1024, buffer.get(), kSize, false)); | |
| 3003 EXPECT_EQ(kSize, | |
| 3004 WriteData(entry, 1, 1024 * 1024, buffer.get(), kSize, false)); | |
| 3005 | |
| 3006 // Delete one of the buffers and truncate the other. | |
| 3007 EXPECT_EQ(0, WriteData(entry, 0, 0, buffer.get(), 0, true)); | |
| 3008 EXPECT_EQ(0, WriteData(entry, 1, 10, buffer.get(), 0, true)); | |
| 3009 | |
| 3010 // Delete the second buffer, writing 10 bytes to disk. | |
| 3011 entry->Close(); | |
| 3012 ASSERT_EQ(net::OK, OpenEntry(key, &entry)); | |
| 3013 } | |
| 3014 | |
| 3015 entry->Close(); | |
| 3016 EXPECT_EQ(0, cache_impl_->GetTotalBuffersSize()); | |
| 3017 } | |
| 3018 | |
| 3019 // This test assumes at least 150MB of system memory. | |
| 3020 TEST_F(DiskCacheBackendTest, TotalBuffersSize2) { | |
| 3021 InitCache(); | |
| 3022 | |
| 3023 const int kOneMB = 1024 * 1024; | |
| 3024 EXPECT_TRUE(cache_impl_->IsAllocAllowed(0, kOneMB)); | |
| 3025 EXPECT_EQ(kOneMB, cache_impl_->GetTotalBuffersSize()); | |
| 3026 | |
| 3027 EXPECT_TRUE(cache_impl_->IsAllocAllowed(0, kOneMB)); | |
| 3028 EXPECT_EQ(kOneMB * 2, cache_impl_->GetTotalBuffersSize()); | |
| 3029 | |
| 3030 EXPECT_TRUE(cache_impl_->IsAllocAllowed(0, kOneMB)); | |
| 3031 EXPECT_EQ(kOneMB * 3, cache_impl_->GetTotalBuffersSize()); | |
| 3032 | |
| 3033 cache_impl_->BufferDeleted(kOneMB); | |
| 3034 EXPECT_EQ(kOneMB * 2, cache_impl_->GetTotalBuffersSize()); | |
| 3035 | |
| 3036 // Check the upper limit. | |
| 3037 EXPECT_FALSE(cache_impl_->IsAllocAllowed(0, 30 * kOneMB)); | |
| 3038 | |
| 3039 for (int i = 0; i < 30; i++) | |
| 3040 cache_impl_->IsAllocAllowed(0, kOneMB); // Ignore the result. | |
| 3041 | |
| 3042 EXPECT_FALSE(cache_impl_->IsAllocAllowed(0, kOneMB)); | |
| 3043 } | |
| 3044 | |
| 3045 // Tests that sharing of external files works and we are able to delete the | |
| 3046 // files when we need to. | |
| 3047 TEST_F(DiskCacheBackendTest, FileSharing) { | |
| 3048 InitCache(); | |
| 3049 | |
| 3050 disk_cache::Addr address(0x80000001); | |
| 3051 ASSERT_TRUE(cache_impl_->CreateExternalFile(&address)); | |
| 3052 base::FilePath name = cache_impl_->GetFileName(address); | |
| 3053 | |
| 3054 scoped_refptr<disk_cache::File> file(new disk_cache::File(false)); | |
| 3055 file->Init(name); | |
| 3056 | |
| 3057 #if defined(OS_WIN) | |
| 3058 DWORD sharing = FILE_SHARE_READ | FILE_SHARE_WRITE; | |
| 3059 DWORD access = GENERIC_READ | GENERIC_WRITE; | |
| 3060 base::win::ScopedHandle file2(CreateFile( | |
| 3061 name.value().c_str(), access, sharing, NULL, OPEN_EXISTING, 0, NULL)); | |
| 3062 EXPECT_FALSE(file2.IsValid()); | |
| 3063 | |
| 3064 sharing |= FILE_SHARE_DELETE; | |
| 3065 file2.Set(CreateFile(name.value().c_str(), access, sharing, NULL, | |
| 3066 OPEN_EXISTING, 0, NULL)); | |
| 3067 EXPECT_TRUE(file2.IsValid()); | |
| 3068 #endif | |
| 3069 | |
| 3070 EXPECT_TRUE(base::DeleteFile(name, false)); | |
| 3071 | |
| 3072 // We should be able to use the file. | |
| 3073 const int kSize = 200; | |
| 3074 char buffer1[kSize]; | |
| 3075 char buffer2[kSize]; | |
| 3076 memset(buffer1, 't', kSize); | |
| 3077 memset(buffer2, 0, kSize); | |
| 3078 EXPECT_TRUE(file->Write(buffer1, kSize, 0)); | |
| 3079 EXPECT_TRUE(file->Read(buffer2, kSize, 0)); | |
| 3080 EXPECT_EQ(0, memcmp(buffer1, buffer2, kSize)); | |
| 3081 | |
| 3082 EXPECT_TRUE(disk_cache::DeleteCacheFile(name)); | |
| 3083 } | |
| 3084 | |
| 3085 TEST_F(DiskCacheBackendTest, UpdateRankForExternalCacheHit) { | |
| 3086 InitCache(); | |
| 3087 | |
| 3088 disk_cache::Entry* entry; | |
| 3089 | |
| 3090 for (int i = 0; i < 2; ++i) { | |
| 3091 std::string key = base::StringPrintf("key%d", i); | |
| 3092 ASSERT_EQ(net::OK, CreateEntry(key, &entry)); | |
| 3093 entry->Close(); | |
| 3094 } | |
| 3095 | |
| 3096 // Ping the oldest entry. | |
| 3097 cache_->OnExternalCacheHit("key0"); | |
| 3098 | |
| 3099 TrimForTest(false); | |
| 3100 | |
| 3101 // Make sure the older key remains. | |
| 3102 EXPECT_EQ(1, cache_->GetEntryCount()); | |
| 3103 ASSERT_EQ(net::OK, OpenEntry("key0", &entry)); | |
| 3104 entry->Close(); | |
| 3105 } | |
| 3106 | |
| 3107 TEST_F(DiskCacheBackendTest, ShaderCacheUpdateRankForExternalCacheHit) { | |
| 3108 SetCacheType(net::SHADER_CACHE); | |
| 3109 InitCache(); | |
| 3110 | |
| 3111 disk_cache::Entry* entry; | |
| 3112 | |
| 3113 for (int i = 0; i < 2; ++i) { | |
| 3114 std::string key = base::StringPrintf("key%d", i); | |
| 3115 ASSERT_EQ(net::OK, CreateEntry(key, &entry)); | |
| 3116 entry->Close(); | |
| 3117 } | |
| 3118 | |
| 3119 // Ping the oldest entry. | |
| 3120 cache_->OnExternalCacheHit("key0"); | |
| 3121 | |
| 3122 TrimForTest(false); | |
| 3123 | |
| 3124 // Make sure the older key remains. | |
| 3125 EXPECT_EQ(1, cache_->GetEntryCount()); | |
| 3126 ASSERT_EQ(net::OK, OpenEntry("key0", &entry)); | |
| 3127 entry->Close(); | |
| 3128 } | |
| 3129 | |
| 3130 void DiskCacheBackendTest::TracingBackendBasics() { | |
| 3131 InitCache(); | |
| 3132 cache_.reset(new disk_cache::TracingCacheBackend(cache_.Pass())); | |
| 3133 cache_impl_ = NULL; | |
| 3134 EXPECT_EQ(net::DISK_CACHE, cache_->GetCacheType()); | |
| 3135 if (!simple_cache_mode_) { | |
| 3136 EXPECT_EQ(0, cache_->GetEntryCount()); | |
| 3137 } | |
| 3138 | |
| 3139 net::TestCompletionCallback cb; | |
| 3140 disk_cache::Entry* entry = NULL; | |
| 3141 EXPECT_NE(net::OK, OpenEntry("key", &entry)); | |
| 3142 EXPECT_TRUE(NULL == entry); | |
| 3143 | |
| 3144 ASSERT_EQ(net::OK, CreateEntry("key", &entry)); | |
| 3145 EXPECT_TRUE(NULL != entry); | |
| 3146 | |
| 3147 disk_cache::Entry* same_entry = NULL; | |
| 3148 ASSERT_EQ(net::OK, OpenEntry("key", &same_entry)); | |
| 3149 EXPECT_TRUE(NULL != same_entry); | |
| 3150 | |
| 3151 if (!simple_cache_mode_) { | |
| 3152 EXPECT_EQ(1, cache_->GetEntryCount()); | |
| 3153 } | |
| 3154 entry->Close(); | |
| 3155 entry = NULL; | |
| 3156 same_entry->Close(); | |
| 3157 same_entry = NULL; | |
| 3158 } | |
| 3159 | |
| 3160 TEST_F(DiskCacheBackendTest, TracingBackendBasics) { | |
| 3161 TracingBackendBasics(); | |
| 3162 } | |
| 3163 | |
| 3164 // The Simple Cache backend requires a few guarantees from the filesystem like | |
| 3165 // atomic renaming of recently open files. Those guarantees are not provided in | |
| 3166 // general on Windows. | |
| 3167 #if defined(OS_POSIX) | |
| 3168 | |
| 3169 TEST_F(DiskCacheBackendTest, SimpleCacheShutdownWithPendingCreate) { | |
| 3170 SetCacheType(net::APP_CACHE); | |
| 3171 SetSimpleCacheMode(); | |
| 3172 BackendShutdownWithPendingCreate(false); | |
| 3173 } | |
| 3174 | |
| 3175 TEST_F(DiskCacheBackendTest, SimpleCacheShutdownWithPendingFileIO) { | |
| 3176 SetCacheType(net::APP_CACHE); | |
| 3177 SetSimpleCacheMode(); | |
| 3178 BackendShutdownWithPendingFileIO(false); | |
| 3179 } | |
| 3180 | |
| 3181 TEST_F(DiskCacheBackendTest, SimpleCacheBasics) { | |
| 3182 SetSimpleCacheMode(); | |
| 3183 BackendBasics(); | |
| 3184 } | |
| 3185 | |
| 3186 TEST_F(DiskCacheBackendTest, SimpleCacheAppCacheBasics) { | |
| 3187 SetCacheType(net::APP_CACHE); | |
| 3188 SetSimpleCacheMode(); | |
| 3189 BackendBasics(); | |
| 3190 } | |
| 3191 | |
| 3192 TEST_F(DiskCacheBackendTest, SimpleCacheKeying) { | |
| 3193 SetSimpleCacheMode(); | |
| 3194 BackendKeying(); | |
| 3195 } | |
| 3196 | |
| 3197 TEST_F(DiskCacheBackendTest, SimpleCacheAppCacheKeying) { | |
| 3198 SetSimpleCacheMode(); | |
| 3199 SetCacheType(net::APP_CACHE); | |
| 3200 BackendKeying(); | |
| 3201 } | |
| 3202 | |
| 3203 TEST_F(DiskCacheBackendTest, DISABLED_SimpleCacheSetSize) { | |
| 3204 SetSimpleCacheMode(); | |
| 3205 BackendSetSize(); | |
| 3206 } | |
| 3207 | |
| 3208 // MacOS has a default open file limit of 256 files, which is incompatible with | |
| 3209 // this simple cache test. | |
| 3210 #if defined(OS_MACOSX) | |
| 3211 #define SIMPLE_MAYBE_MACOS(TestName) DISABLED_ ## TestName | |
| 3212 #else | |
| 3213 #define SIMPLE_MAYBE_MACOS(TestName) TestName | |
| 3214 #endif | |
| 3215 | |
| 3216 TEST_F(DiskCacheBackendTest, SIMPLE_MAYBE_MACOS(SimpleCacheLoad)) { | |
| 3217 SetMaxSize(0x100000); | |
| 3218 SetSimpleCacheMode(); | |
| 3219 BackendLoad(); | |
| 3220 } | |
| 3221 | |
| 3222 TEST_F(DiskCacheBackendTest, SIMPLE_MAYBE_MACOS(SimpleCacheAppCacheLoad)) { | |
| 3223 SetCacheType(net::APP_CACHE); | |
| 3224 SetSimpleCacheMode(); | |
| 3225 SetMaxSize(0x100000); | |
| 3226 BackendLoad(); | |
| 3227 } | |
| 3228 | |
| 3229 TEST_F(DiskCacheBackendTest, SimpleDoomRecent) { | |
| 3230 SetSimpleCacheMode(); | |
| 3231 BackendDoomRecent(); | |
| 3232 } | |
| 3233 | |
| 3234 TEST_F(DiskCacheBackendTest, SimpleDoomBetween) { | |
| 3235 SetSimpleCacheMode(); | |
| 3236 BackendDoomBetween(); | |
| 3237 } | |
| 3238 | |
| 3239 TEST_F(DiskCacheBackendTest, SimpleCacheDoomAll) { | |
| 3240 SetSimpleCacheMode(); | |
| 3241 BackendDoomAll(); | |
| 3242 } | |
| 3243 | |
| 3244 TEST_F(DiskCacheBackendTest, SimpleCacheAppCacheOnlyDoomAll) { | |
| 3245 SetCacheType(net::APP_CACHE); | |
| 3246 SetSimpleCacheMode(); | |
| 3247 BackendDoomAll(); | |
| 3248 } | |
| 3249 | |
| 3250 TEST_F(DiskCacheBackendTest, SimpleCacheTracingBackendBasics) { | |
| 3251 SetSimpleCacheMode(); | |
| 3252 TracingBackendBasics(); | |
| 3253 // TODO(pasko): implement integrity checking on the Simple Backend. | |
| 3254 DisableIntegrityCheck(); | |
| 3255 } | |
| 3256 | |
| 3257 TEST_F(DiskCacheBackendTest, SimpleCacheOpenMissingFile) { | |
| 3258 SetSimpleCacheMode(); | |
| 3259 InitCache(); | |
| 3260 | |
| 3261 const char* key = "the first key"; | |
| 3262 disk_cache::Entry* entry = NULL; | |
| 3263 | |
| 3264 ASSERT_EQ(net::OK, CreateEntry(key, &entry)); | |
| 3265 ASSERT_TRUE(entry != NULL); | |
| 3266 entry->Close(); | |
| 3267 entry = NULL; | |
| 3268 | |
| 3269 // To make sure the file creation completed we need to call open again so that | |
| 3270 // we block until it actually created the files. | |
| 3271 ASSERT_EQ(net::OK, OpenEntry(key, &entry)); | |
| 3272 ASSERT_TRUE(entry != NULL); | |
| 3273 entry->Close(); | |
| 3274 entry = NULL; | |
| 3275 | |
| 3276 // Delete one of the files in the entry. | |
| 3277 base::FilePath to_delete_file = cache_path_.AppendASCII( | |
| 3278 disk_cache::simple_util::GetFilenameFromKeyAndFileIndex(key, 0)); | |
| 3279 EXPECT_TRUE(base::PathExists(to_delete_file)); | |
| 3280 EXPECT_TRUE(disk_cache::DeleteCacheFile(to_delete_file)); | |
| 3281 | |
| 3282 // Failing to open the entry should delete the rest of these files. | |
| 3283 ASSERT_EQ(net::ERR_FAILED, OpenEntry(key, &entry)); | |
| 3284 | |
| 3285 // Confirm the rest of the files are gone. | |
| 3286 for (int i = 1; i < disk_cache::kSimpleEntryFileCount; ++i) { | |
| 3287 base::FilePath should_be_gone_file(cache_path_.AppendASCII( | |
| 3288 disk_cache::simple_util::GetFilenameFromKeyAndFileIndex(key, i))); | |
| 3289 EXPECT_FALSE(base::PathExists(should_be_gone_file)); | |
| 3290 } | |
| 3291 } | |
| 3292 | |
| 3293 TEST_F(DiskCacheBackendTest, SimpleCacheOpenBadFile) { | |
| 3294 SetSimpleCacheMode(); | |
| 3295 InitCache(); | |
| 3296 | |
| 3297 const char* key = "the first key"; | |
| 3298 disk_cache::Entry* entry = NULL; | |
| 3299 | |
| 3300 ASSERT_EQ(net::OK, CreateEntry(key, &entry)); | |
| 3301 disk_cache::Entry* null = NULL; | |
| 3302 ASSERT_NE(null, entry); | |
| 3303 entry->Close(); | |
| 3304 entry = NULL; | |
| 3305 | |
| 3306 // To make sure the file creation completed we need to call open again so that | |
| 3307 // we block until it actually created the files. | |
| 3308 ASSERT_EQ(net::OK, OpenEntry(key, &entry)); | |
| 3309 ASSERT_NE(null, entry); | |
| 3310 entry->Close(); | |
| 3311 entry = NULL; | |
| 3312 | |
| 3313 // Write an invalid header for stream 0 and stream 1. | |
| 3314 base::FilePath entry_file1_path = cache_path_.AppendASCII( | |
| 3315 disk_cache::simple_util::GetFilenameFromKeyAndFileIndex(key, 0)); | |
| 3316 | |
| 3317 disk_cache::SimpleFileHeader header; | |
| 3318 header.initial_magic_number = GG_UINT64_C(0xbadf00d); | |
| 3319 EXPECT_EQ( | |
| 3320 implicit_cast<int>(sizeof(header)), | |
| 3321 file_util::WriteFile(entry_file1_path, reinterpret_cast<char*>(&header), | |
| 3322 sizeof(header))); | |
| 3323 ASSERT_EQ(net::ERR_FAILED, OpenEntry(key, &entry)); | |
| 3324 } | |
| 3325 | |
| 3326 // Tests that the Simple Cache Backend fails to initialize with non-matching | |
| 3327 // file structure on disk. | |
| 3328 TEST_F(DiskCacheBackendTest, SimpleCacheOverBlockfileCache) { | |
| 3329 // Create a cache structure with the |BackendImpl|. | |
| 3330 InitCache(); | |
| 3331 disk_cache::Entry* entry; | |
| 3332 const int kSize = 50; | |
| 3333 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize)); | |
| 3334 CacheTestFillBuffer(buffer->data(), kSize, false); | |
| 3335 ASSERT_EQ(net::OK, CreateEntry("key", &entry)); | |
| 3336 ASSERT_EQ(0, WriteData(entry, 0, 0, buffer.get(), 0, false)); | |
| 3337 entry->Close(); | |
| 3338 cache_.reset(); | |
| 3339 | |
| 3340 // Check that the |SimpleBackendImpl| does not favor this structure. | |
| 3341 base::Thread cache_thread("CacheThread"); | |
| 3342 ASSERT_TRUE(cache_thread.StartWithOptions( | |
| 3343 base::Thread::Options(base::MessageLoop::TYPE_IO, 0))); | |
| 3344 disk_cache::SimpleBackendImpl* simple_cache = | |
| 3345 new disk_cache::SimpleBackendImpl(cache_path_, | |
| 3346 0, | |
| 3347 net::DISK_CACHE, | |
| 3348 cache_thread.message_loop_proxy().get(), | |
| 3349 NULL); | |
| 3350 net::TestCompletionCallback cb; | |
| 3351 int rv = simple_cache->Init(cb.callback()); | |
| 3352 EXPECT_NE(net::OK, cb.GetResult(rv)); | |
| 3353 delete simple_cache; | |
| 3354 DisableIntegrityCheck(); | |
| 3355 } | |
| 3356 | |
| 3357 // Tests that the |BackendImpl| refuses to initialize on top of the files | |
| 3358 // generated by the Simple Cache Backend. | |
| 3359 TEST_F(DiskCacheBackendTest, BlockfileCacheOverSimpleCache) { | |
| 3360 // Create a cache structure with the |SimpleBackendImpl|. | |
| 3361 SetSimpleCacheMode(); | |
| 3362 InitCache(); | |
| 3363 disk_cache::Entry* entry; | |
| 3364 const int kSize = 50; | |
| 3365 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize)); | |
| 3366 CacheTestFillBuffer(buffer->data(), kSize, false); | |
| 3367 ASSERT_EQ(net::OK, CreateEntry("key", &entry)); | |
| 3368 ASSERT_EQ(0, WriteData(entry, 0, 0, buffer.get(), 0, false)); | |
| 3369 entry->Close(); | |
| 3370 cache_.reset(); | |
| 3371 | |
| 3372 // Check that the |BackendImpl| does not favor this structure. | |
| 3373 base::Thread cache_thread("CacheThread"); | |
| 3374 ASSERT_TRUE(cache_thread.StartWithOptions( | |
| 3375 base::Thread::Options(base::MessageLoop::TYPE_IO, 0))); | |
| 3376 disk_cache::BackendImpl* cache = new disk_cache::BackendImpl( | |
| 3377 cache_path_, base::MessageLoopProxy::current().get(), NULL); | |
| 3378 cache->SetUnitTestMode(); | |
| 3379 net::TestCompletionCallback cb; | |
| 3380 int rv = cache->Init(cb.callback()); | |
| 3381 EXPECT_NE(net::OK, cb.GetResult(rv)); | |
| 3382 delete cache; | |
| 3383 DisableIntegrityCheck(); | |
| 3384 } | |
| 3385 | |
| 3386 TEST_F(DiskCacheBackendTest, SimpleCacheFixEnumerators) { | |
| 3387 SetSimpleCacheMode(); | |
| 3388 BackendFixEnumerators(); | |
| 3389 } | |
| 3390 | |
| 3391 // Tests basic functionality of the SimpleBackend implementation of the | |
| 3392 // enumeration API. | |
| 3393 TEST_F(DiskCacheBackendTest, SimpleCacheEnumerationBasics) { | |
| 3394 SetSimpleCacheMode(); | |
| 3395 InitCache(); | |
| 3396 std::set<std::string> key_pool; | |
| 3397 ASSERT_TRUE(CreateSetOfRandomEntries(&key_pool)); | |
| 3398 | |
| 3399 // Check that enumeration returns all entries. | |
| 3400 std::set<std::string> keys_to_match(key_pool); | |
| 3401 void* iter = NULL; | |
| 3402 size_t count = 0; | |
| 3403 ASSERT_TRUE(EnumerateAndMatchKeys(-1, &iter, &keys_to_match, &count)); | |
| 3404 cache_->EndEnumeration(&iter); | |
| 3405 EXPECT_EQ(key_pool.size(), count); | |
| 3406 EXPECT_TRUE(keys_to_match.empty()); | |
| 3407 | |
| 3408 // Check that opening entries does not affect enumeration. | |
| 3409 keys_to_match = key_pool; | |
| 3410 iter = NULL; | |
| 3411 count = 0; | |
| 3412 disk_cache::Entry* entry_opened_before; | |
| 3413 ASSERT_EQ(net::OK, OpenEntry(*(key_pool.begin()), &entry_opened_before)); | |
| 3414 ASSERT_TRUE(EnumerateAndMatchKeys(key_pool.size()/2, | |
| 3415 &iter, | |
| 3416 &keys_to_match, | |
| 3417 &count)); | |
| 3418 | |
| 3419 disk_cache::Entry* entry_opened_middle; | |
| 3420 ASSERT_EQ(net::OK, | |
| 3421 OpenEntry(*(keys_to_match.begin()), &entry_opened_middle)); | |
| 3422 ASSERT_TRUE(EnumerateAndMatchKeys(-1, &iter, &keys_to_match, &count)); | |
| 3423 cache_->EndEnumeration(&iter); | |
| 3424 entry_opened_before->Close(); | |
| 3425 entry_opened_middle->Close(); | |
| 3426 | |
| 3427 EXPECT_EQ(key_pool.size(), count); | |
| 3428 EXPECT_TRUE(keys_to_match.empty()); | |
| 3429 } | |
| 3430 | |
| 3431 // Tests that the enumerations are not affected by dooming an entry in the | |
| 3432 // middle. | |
| 3433 TEST_F(DiskCacheBackendTest, SimpleCacheEnumerationWhileDoomed) { | |
| 3434 SetSimpleCacheMode(); | |
| 3435 InitCache(); | |
| 3436 std::set<std::string> key_pool; | |
| 3437 ASSERT_TRUE(CreateSetOfRandomEntries(&key_pool)); | |
| 3438 | |
| 3439 // Check that enumeration returns all entries but the doomed one. | |
| 3440 std::set<std::string> keys_to_match(key_pool); | |
| 3441 void* iter = NULL; | |
| 3442 size_t count = 0; | |
| 3443 ASSERT_TRUE(EnumerateAndMatchKeys(key_pool.size()/2, | |
| 3444 &iter, | |
| 3445 &keys_to_match, | |
| 3446 &count)); | |
| 3447 | |
| 3448 std::string key_to_delete = *(keys_to_match.begin()); | |
| 3449 DoomEntry(key_to_delete); | |
| 3450 keys_to_match.erase(key_to_delete); | |
| 3451 key_pool.erase(key_to_delete); | |
| 3452 ASSERT_TRUE(EnumerateAndMatchKeys(-1, &iter, &keys_to_match, &count)); | |
| 3453 cache_->EndEnumeration(&iter); | |
| 3454 | |
| 3455 EXPECT_EQ(key_pool.size(), count); | |
| 3456 EXPECT_TRUE(keys_to_match.empty()); | |
| 3457 } | |
| 3458 | |
| 3459 // Tests that enumerations are not affected by corrupt files. | |
| 3460 TEST_F(DiskCacheBackendTest, SimpleCacheEnumerationCorruption) { | |
| 3461 SetSimpleCacheMode(); | |
| 3462 InitCache(); | |
| 3463 std::set<std::string> key_pool; | |
| 3464 ASSERT_TRUE(CreateSetOfRandomEntries(&key_pool)); | |
| 3465 | |
| 3466 // Create a corrupt entry. The write/read sequence ensures that the entry will | |
| 3467 // have been created before corrupting the platform files, in the case of | |
| 3468 // optimistic operations. | |
| 3469 const std::string key = "the key"; | |
| 3470 disk_cache::Entry* corrupted_entry; | |
| 3471 | |
| 3472 ASSERT_EQ(net::OK, CreateEntry(key, &corrupted_entry)); | |
| 3473 ASSERT_TRUE(corrupted_entry); | |
| 3474 const int kSize = 50; | |
| 3475 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize)); | |
| 3476 CacheTestFillBuffer(buffer->data(), kSize, false); | |
| 3477 ASSERT_EQ(kSize, | |
| 3478 WriteData(corrupted_entry, 0, 0, buffer.get(), kSize, false)); | |
| 3479 ASSERT_EQ(kSize, ReadData(corrupted_entry, 0, 0, buffer.get(), kSize)); | |
| 3480 corrupted_entry->Close(); | |
| 3481 | |
| 3482 EXPECT_TRUE(disk_cache::simple_util::CreateCorruptFileForTests( | |
| 3483 key, cache_path_)); | |
| 3484 EXPECT_EQ(key_pool.size() + 1, | |
| 3485 implicit_cast<size_t>(cache_->GetEntryCount())); | |
| 3486 | |
| 3487 // Check that enumeration returns all entries but the corrupt one. | |
| 3488 std::set<std::string> keys_to_match(key_pool); | |
| 3489 void* iter = NULL; | |
| 3490 size_t count = 0; | |
| 3491 ASSERT_TRUE(EnumerateAndMatchKeys(-1, &iter, &keys_to_match, &count)); | |
| 3492 cache_->EndEnumeration(&iter); | |
| 3493 | |
| 3494 EXPECT_EQ(key_pool.size(), count); | |
| 3495 EXPECT_TRUE(keys_to_match.empty()); | |
| 3496 } | |
| 3497 | |
| 3498 #endif // defined(OS_POSIX) | |
| OLD | NEW |