OLD | NEW |
(Empty) | |
| 1 // Copyright (c) 2014 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. |
| 4 |
| 5 #include "base/file_util.h" |
| 6 #include "base/single_thread_task_runner.h" |
| 7 #include "base/strings/string_util.h" |
| 8 #include "base/strings/stringprintf.h" |
| 9 #include "base/threading/thread_restrictions.h" |
| 10 #include "base/memory/scoped_ptr.h" |
| 11 #include "net/base/test_completion_callback.h" |
| 12 #include "net/disk_cache/cache_util.h" |
| 13 #include "net/disk_cache/disk_cache_test.h" |
| 14 #include "net/disk_cache/disk_cache_test_util.h" |
| 15 #include "net/disk_cache/entry_tests.h" |
| 16 #include "net/disk_cache/backend_tests.h" |
| 17 #include "net/disk_cache/entry_sync_tests.h" |
| 18 #include "net/disk_cache/blockfile/backend_impl.h" |
| 19 #include "net/disk_cache/blockfile/entry_impl.h" |
| 20 |
| 21 #define CACHE_HISTOGRAM_MACROS_BACKEND_IMPL_OBJ backend_ |
| 22 #include "net/disk_cache/blockfile/histogram_macros.h" |
| 23 |
| 24 namespace disk_cache { |
| 25 |
| 26 namespace { |
| 27 |
| 28 class BlockfileCacheCreateBackendExtraData |
| 29 : public BackendTestTraits::CreateBackendExtraData { |
| 30 public: |
| 31 uint32 mask() const { return mask_; } |
| 32 void set_mask(uint32 mask) { mask_ = mask; } |
| 33 |
| 34 private: |
| 35 friend class DiskCacheBlockfileBackendTest; |
| 36 |
| 37 BlockfileCacheCreateBackendExtraData() : mask_(0) {} |
| 38 |
| 39 uint32 mask_; |
| 40 }; |
| 41 |
| 42 class BlockfileCacheBackendTraits : public BackendTestTraits { |
| 43 public: |
| 44 virtual ~BlockfileCacheBackendTraits() {} |
| 45 |
| 46 virtual Backend* CreateBackend( |
| 47 const CreateBackendExtraData* extra_data, |
| 48 const base::FilePath& cache_path, |
| 49 int max_size, |
| 50 base::MessageLoopProxy* task_runner) const OVERRIDE { |
| 51 const BlockfileCacheCreateBackendExtraData* blockfile_extra_data = |
| 52 static_cast<const BlockfileCacheCreateBackendExtraData*>(extra_data); |
| 53 |
| 54 scoped_ptr<BackendImpl> blockfile_backend; |
| 55 if (blockfile_extra_data && blockfile_extra_data->mask()) { |
| 56 blockfile_backend.reset( |
| 57 new BackendImpl(cache_path, blockfile_extra_data->mask(), |
| 58 task_runner, NULL)); |
| 59 } else { |
| 60 blockfile_backend.reset(new BackendImpl(cache_path, task_runner, NULL)); |
| 61 } |
| 62 if (!blockfile_backend) |
| 63 return NULL; |
| 64 |
| 65 blockfile_backend->SetMaxSize(max_size); |
| 66 if (flags_ & kNewEviction) |
| 67 blockfile_backend->SetNewEviction(); |
| 68 blockfile_backend->SetFlags(flags_); |
| 69 blockfile_backend->SetType(type_); |
| 70 |
| 71 net::TestCompletionCallback cb; |
| 72 int result = blockfile_backend->Init(cb.callback()); |
| 73 if (cb.GetResult(result) != net::OK) { |
| 74 LOG(INFO) << "init failed :-("; |
| 75 return NULL; |
| 76 } |
| 77 return blockfile_backend.release(); |
| 78 } |
| 79 |
| 80 virtual bool UsesCacheThread() const OVERRIDE { return true; } |
| 81 virtual bool WritesUpdateLastUsed() const OVERRIDE { |
| 82 return type_ != net::APP_CACHE; |
| 83 } |
| 84 virtual bool ReadsUpdateLastUsed() const OVERRIDE { |
| 85 return type_ != net::APP_CACHE && type_ != net::SHADER_CACHE; |
| 86 } |
| 87 virtual int SparseRoundingInterval() const OVERRIDE { return 1024; } |
| 88 virtual bool EntryCountIncludesSparseRanges() const OVERRIDE { |
| 89 return true; |
| 90 } |
| 91 virtual bool EnumerationsAreLexicographicByKey() const OVERRIDE { |
| 92 return true; |
| 93 } |
| 94 |
| 95 virtual bool SetMaxSize(Backend* backend, int size) const OVERRIDE { |
| 96 return static_cast<BackendImpl*>(backend)->SetMaxSize(size); |
| 97 } |
| 98 |
| 99 virtual void FlushQueueForTest(Backend* backend) const OVERRIDE { |
| 100 BackendImpl* blockfile_backend = static_cast<BackendImpl*>(backend); |
| 101 |
| 102 net::TestCompletionCallback cb; |
| 103 int rv = blockfile_backend->FlushQueueForTest(cb.callback()); |
| 104 EXPECT_EQ(net::OK, cb.GetResult(rv)); |
| 105 } |
| 106 |
| 107 net::CacheType type() const { return type_; } |
| 108 uint32 flags() const { return flags_; } |
| 109 |
| 110 // DiskCache(), AppCache() and ShaderCache() return traits objects for the |
| 111 // entry unit tests. |
| 112 static const BackendTestTraits* DiskCache() { |
| 113 static const BlockfileCacheBackendTraits traits(net::DISK_CACHE, kNoRandom); |
| 114 return &traits; |
| 115 } |
| 116 |
| 117 static const BackendTestTraits* AppCache() { |
| 118 static const BlockfileCacheBackendTraits traits(net::APP_CACHE, kNoRandom); |
| 119 return &traits; |
| 120 } |
| 121 |
| 122 static const BackendTestTraits* ShaderCache() { |
| 123 static const BlockfileCacheBackendTraits traits(net::SHADER_CACHE, |
| 124 kNoRandom); |
| 125 return &traits; |
| 126 } |
| 127 |
| 128 // OldEviction() and NewEviction() return traits objects for the backend |
| 129 // unit tests. |
| 130 static const BackendTestTraits* OldEviction() { |
| 131 static const BlockfileCacheBackendTraits traits(net::DISK_CACHE, kNoRandom); |
| 132 return &traits; |
| 133 } |
| 134 |
| 135 static const BackendTestTraits* NewEviction() { |
| 136 static const BlockfileCacheBackendTraits traits(net::DISK_CACHE, |
| 137 kNoRandom | kNewEviction); |
| 138 return &traits; |
| 139 } |
| 140 |
| 141 private: |
| 142 BlockfileCacheBackendTraits(net::CacheType type, |
| 143 uint32 flags) : type_(type), |
| 144 flags_(flags) {} |
| 145 |
| 146 const net::CacheType type_; |
| 147 const uint32 flags_; |
| 148 }; |
| 149 |
| 150 // Run entry tests on DISK_CACHE, APP_CACHE and SHADER_CACHE type caches. |
| 151 INSTANTIATE_TEST_CASE_P( |
| 152 BlockfileCache, DiskCacheEntryTest, |
| 153 ::testing::Values(BlockfileCacheBackendTraits::DiskCache(), |
| 154 BlockfileCacheBackendTraits::AppCache(), |
| 155 BlockfileCacheBackendTraits::ShaderCache())); |
| 156 |
| 157 INSTANTIATE_TEST_CASE_P( |
| 158 BlockfileCache, DiskCacheEntrySyncTest, |
| 159 ::testing::Values(BlockfileCacheBackendTraits::DiskCache(), |
| 160 BlockfileCacheBackendTraits::AppCache(), |
| 161 BlockfileCacheBackendTraits::ShaderCache())); |
| 162 |
| 163 class DiskCacheBlockfileEntryTest : public DiskCacheTest { |
| 164 protected: |
| 165 BackendImpl* cache_impl() { |
| 166 return static_cast<BackendImpl*>(DiskCacheTest::cache()); |
| 167 } |
| 168 }; |
| 169 |
| 170 INSTANTIATE_TEST_CASE_P( |
| 171 BlockfileCache, DiskCacheBlockfileEntryTest, |
| 172 ::testing::Values(BlockfileCacheBackendTraits::DiskCache(), |
| 173 BlockfileCacheBackendTraits::AppCache(), |
| 174 BlockfileCacheBackendTraits::ShaderCache())); |
| 175 |
| 176 // Run backend tests using both the old and the new eviction algorithm. |
| 177 INSTANTIATE_TEST_CASE_P( |
| 178 BlockfileCache, DiskCacheBackendTest, |
| 179 ::testing::Values(BlockfileCacheBackendTraits::OldEviction(), |
| 180 BlockfileCacheBackendTraits::NewEviction())); |
| 181 |
| 182 |
| 183 class DiskCacheBlockfileBackendTest : public DiskCacheTest { |
| 184 public: |
| 185 BackendImpl* cache_impl() { |
| 186 return static_cast<BackendImpl*>(DiskCacheTest::cache()); |
| 187 } |
| 188 |
| 189 const BlockfileCacheBackendTraits* blockfile_traits() const { |
| 190 return static_cast<const BlockfileCacheBackendTraits*>( |
| 191 DiskCacheTest::traits()); |
| 192 } |
| 193 |
| 194 void InitCache() { |
| 195 InitCacheWithExtraData(&extra_data_); |
| 196 } |
| 197 |
| 198 void SimulateCrash() { |
| 199 net::TestCompletionCallback cb; |
| 200 int rv = cache_impl()->FlushQueueForTest(cb.callback()); |
| 201 ASSERT_EQ(net::OK, cb.GetResult(rv)); |
| 202 cache_impl()->ClearRefCountForTest(); |
| 203 |
| 204 // cache_.reset(); |
| 205 //PECT_TRUE(CheckCacheIntegrity(cache_path_, new_eviction_, mask_)); |
| 206 |
| 207 InitCache(); |
| 208 } |
| 209 |
| 210 void TrimForTest(bool empty) { |
| 211 RunTaskForTest(base::Bind(&disk_cache::BackendImpl::TrimForTest, |
| 212 base::Unretained(cache_impl()), |
| 213 empty)); |
| 214 } |
| 215 |
| 216 void TrimDeletedListForTest(bool empty) { |
| 217 RunTaskForTest(base::Bind(&disk_cache::BackendImpl::TrimDeletedListForTest, |
| 218 base::Unretained(cache_impl()), |
| 219 empty)); |
| 220 } |
| 221 |
| 222 BlockfileCacheCreateBackendExtraData* extra_data() { return &extra_data_; } |
| 223 |
| 224 void SetMask(uint32 mask) { |
| 225 extra_data()->set_mask(mask); |
| 226 } |
| 227 |
| 228 private: |
| 229 void RunTaskForTest(const base::Closure& closure) { |
| 230 net::TestCompletionCallback cb; |
| 231 int rv = cache_impl()->RunTaskForTest(closure, cb.callback()); |
| 232 EXPECT_EQ(net::OK, cb.GetResult(rv)); |
| 233 } |
| 234 |
| 235 BlockfileCacheCreateBackendExtraData extra_data_; |
| 236 }; |
| 237 |
| 238 INSTANTIATE_TEST_CASE_P( |
| 239 BlockfileCache, DiskCacheBlockfileBackendTest, |
| 240 ::testing::Values(BlockfileCacheBackendTraits::OldEviction(), |
| 241 BlockfileCacheBackendTraits::NewEviction())); |
| 242 |
| 243 // Tests that we handle the content correctly when buffering, a feature of the |
| 244 // blockfile cache that permits fast responses to certain reads. |
| 245 TEST_P(DiskCacheBlockfileEntryTest, BlockfileBuffering) { |
| 246 InitCache(); |
| 247 |
| 248 std::string key("the first key"); |
| 249 disk_cache::Entry* entry; |
| 250 ASSERT_EQ(net::OK, CreateEntry(key, &entry)); |
| 251 |
| 252 const int kSize = 200; |
| 253 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize)); |
| 254 scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize)); |
| 255 CacheTestFillBuffer(buffer1->data(), kSize, true); |
| 256 CacheTestFillBuffer(buffer2->data(), kSize, true); |
| 257 |
| 258 EXPECT_EQ(kSize, WriteData(entry, 1, 0, buffer1.get(), kSize, false)); |
| 259 entry->Close(); |
| 260 |
| 261 // Write a little more and read what we wrote before. |
| 262 ASSERT_EQ(net::OK, OpenEntry(key, &entry)); |
| 263 EXPECT_EQ(kSize, WriteData(entry, 1, 5000, buffer1.get(), kSize, false)); |
| 264 EXPECT_EQ(kSize, ReadData(entry, 1, 0, buffer2.get(), kSize)); |
| 265 EXPECT_TRUE(!memcmp(buffer2->data(), buffer1->data(), kSize)); |
| 266 |
| 267 // Now go to an external file. |
| 268 EXPECT_EQ(kSize, WriteData(entry, 1, 18000, buffer1.get(), kSize, false)); |
| 269 entry->Close(); |
| 270 |
| 271 // Write something else and verify old data. |
| 272 ASSERT_EQ(net::OK, OpenEntry(key, &entry)); |
| 273 EXPECT_EQ(kSize, WriteData(entry, 1, 10000, buffer1.get(), kSize, false)); |
| 274 CacheTestFillBuffer(buffer2->data(), kSize, true); |
| 275 EXPECT_EQ(kSize, ReadData(entry, 1, 5000, buffer2.get(), kSize)); |
| 276 EXPECT_TRUE(!memcmp(buffer2->data(), buffer1->data(), kSize)); |
| 277 CacheTestFillBuffer(buffer2->data(), kSize, true); |
| 278 EXPECT_EQ(kSize, ReadData(entry, 1, 0, buffer2.get(), kSize)); |
| 279 EXPECT_TRUE(!memcmp(buffer2->data(), buffer1->data(), kSize)); |
| 280 CacheTestFillBuffer(buffer2->data(), kSize, true); |
| 281 EXPECT_EQ(kSize, ReadData(entry, 1, 18000, buffer2.get(), kSize)); |
| 282 EXPECT_TRUE(!memcmp(buffer2->data(), buffer1->data(), kSize)); |
| 283 |
| 284 // Extend the file some more. |
| 285 EXPECT_EQ(kSize, WriteData(entry, 1, 23000, buffer1.get(), kSize, false)); |
| 286 entry->Close(); |
| 287 |
| 288 // And now make sure that we can deal with data in both places (ram/disk). |
| 289 ASSERT_EQ(net::OK, OpenEntry(key, &entry)); |
| 290 EXPECT_EQ(kSize, WriteData(entry, 1, 17000, buffer1.get(), kSize, false)); |
| 291 |
| 292 // We should not overwrite the data at 18000 with this. |
| 293 EXPECT_EQ(kSize, WriteData(entry, 1, 19000, buffer1.get(), kSize, false)); |
| 294 CacheTestFillBuffer(buffer2->data(), kSize, true); |
| 295 EXPECT_EQ(kSize, ReadData(entry, 1, 18000, buffer2.get(), kSize)); |
| 296 EXPECT_TRUE(!memcmp(buffer2->data(), buffer1->data(), kSize)); |
| 297 CacheTestFillBuffer(buffer2->data(), kSize, true); |
| 298 EXPECT_EQ(kSize, ReadData(entry, 1, 17000, buffer2.get(), kSize)); |
| 299 EXPECT_TRUE(!memcmp(buffer2->data(), buffer1->data(), kSize)); |
| 300 |
| 301 EXPECT_EQ(kSize, WriteData(entry, 1, 22900, buffer1.get(), kSize, false)); |
| 302 CacheTestFillBuffer(buffer2->data(), kSize, true); |
| 303 EXPECT_EQ(100, ReadData(entry, 1, 23000, buffer2.get(), kSize)); |
| 304 EXPECT_TRUE(!memcmp(buffer2->data(), buffer1->data() + 100, 100)); |
| 305 |
| 306 CacheTestFillBuffer(buffer2->data(), kSize, true); |
| 307 EXPECT_EQ(100, ReadData(entry, 1, 23100, buffer2.get(), kSize)); |
| 308 EXPECT_TRUE(!memcmp(buffer2->data(), buffer1->data() + 100, 100)); |
| 309 |
| 310 // Extend the file again and read before without closing the entry. |
| 311 EXPECT_EQ(kSize, WriteData(entry, 1, 25000, buffer1.get(), kSize, false)); |
| 312 EXPECT_EQ(kSize, WriteData(entry, 1, 45000, buffer1.get(), kSize, false)); |
| 313 CacheTestFillBuffer(buffer2->data(), kSize, true); |
| 314 EXPECT_EQ(kSize, ReadData(entry, 1, 25000, buffer2.get(), kSize)); |
| 315 EXPECT_TRUE(!memcmp(buffer2->data(), buffer1->data(), kSize)); |
| 316 CacheTestFillBuffer(buffer2->data(), kSize, true); |
| 317 EXPECT_EQ(kSize, ReadData(entry, 1, 45000, buffer2.get(), kSize)); |
| 318 EXPECT_TRUE(!memcmp(buffer2->data(), buffer1->data(), kSize)); |
| 319 |
| 320 entry->Close(); |
| 321 } |
| 322 |
| 323 // Tests that we discard entries if the data is missing. |
| 324 TEST_P(DiskCacheBlockfileEntryTest, MissingData) { |
| 325 InitCache(); |
| 326 |
| 327 std::string key("the first key"); |
| 328 disk_cache::Entry* entry; |
| 329 ASSERT_EQ(net::OK, CreateEntry(key, &entry)); |
| 330 |
| 331 // Write to an external file. |
| 332 const int kSize = 20000; |
| 333 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize)); |
| 334 CacheTestFillBuffer(buffer->data(), kSize, false); |
| 335 EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false)); |
| 336 entry->Close(); |
| 337 FlushQueueForTest(); |
| 338 |
| 339 disk_cache::Addr address(0x80000001); |
| 340 base::FilePath name = cache_impl()->GetFileName(address); |
| 341 EXPECT_TRUE(base::DeleteFile(name, false)); |
| 342 |
| 343 // Attempt to read the data. |
| 344 ASSERT_EQ(net::OK, OpenEntry(key, &entry)); |
| 345 EXPECT_EQ(net::ERR_FILE_NOT_FOUND, |
| 346 ReadData(entry, 0, 0, buffer.get(), kSize)); |
| 347 entry->Close(); |
| 348 |
| 349 // The entry should be gone. |
| 350 ASSERT_NE(net::OK, OpenEntry(key, &entry)); |
| 351 } |
| 352 |
| 353 // Tests that corrupt sparse children are removed automatically. |
| 354 TEST_P(DiskCacheBlockfileEntryTest, CleanupSparseEntry) { |
| 355 InitCache(); |
| 356 std::string key("the first key"); |
| 357 disk_cache::Entry* entry; |
| 358 ASSERT_EQ(net::OK, CreateEntry(key, &entry)); |
| 359 |
| 360 const int kSize = 4 * 1024; |
| 361 scoped_refptr<net::IOBuffer> buf1(new net::IOBuffer(kSize)); |
| 362 CacheTestFillBuffer(buf1->data(), kSize, false); |
| 363 |
| 364 const int k1Meg = 1024 * 1024; |
| 365 EXPECT_EQ(kSize, WriteSparseData(entry, 8192, buf1.get(), kSize)); |
| 366 EXPECT_EQ(kSize, WriteSparseData(entry, k1Meg + 8192, buf1.get(), kSize)); |
| 367 EXPECT_EQ(kSize, WriteSparseData(entry, 2 * k1Meg + 8192, buf1.get(), kSize)); |
| 368 entry->Close(); |
| 369 EXPECT_EQ(4, cache()->GetEntryCount()); |
| 370 |
| 371 void* iter = NULL; |
| 372 int count = 0; |
| 373 std::string child_key[2]; |
| 374 while (OpenNextEntry(&iter, &entry) == net::OK) { |
| 375 ASSERT_TRUE(entry != NULL); |
| 376 // Writing to an entry will alter the LRU list and invalidate the iterator. |
| 377 if (entry->GetKey() != key && count < 2) |
| 378 child_key[count++] = entry->GetKey(); |
| 379 entry->Close(); |
| 380 } |
| 381 for (int i = 0; i < 2; i++) { |
| 382 ASSERT_EQ(net::OK, OpenEntry(child_key[i], &entry)); |
| 383 // Overwrite the header's magic and signature. |
| 384 EXPECT_EQ(12, WriteData(entry, 2, 0, buf1.get(), 12, false)); |
| 385 entry->Close(); |
| 386 } |
| 387 |
| 388 EXPECT_EQ(4, cache()->GetEntryCount()); |
| 389 ASSERT_EQ(net::OK, OpenEntry(key, &entry)); |
| 390 |
| 391 // Two children should be gone. One while reading and one while writing. |
| 392 EXPECT_EQ(0, ReadSparseData(entry, 2 * k1Meg + 8192, buf1.get(), kSize)); |
| 393 EXPECT_EQ(kSize, WriteSparseData(entry, k1Meg + 16384, buf1.get(), kSize)); |
| 394 EXPECT_EQ(0, ReadSparseData(entry, k1Meg + 8192, buf1.get(), kSize)); |
| 395 |
| 396 // We never touched this one. |
| 397 EXPECT_EQ(kSize, ReadSparseData(entry, 8192, buf1.get(), kSize)); |
| 398 entry->Close(); |
| 399 |
| 400 // We re-created one of the corrupt children. |
| 401 EXPECT_EQ(3, cache()->GetEntryCount()); |
| 402 } |
| 403 |
| 404 TEST_P(DiskCacheBlockfileEntryTest, CancelSparseIO) { |
| 405 UseCurrentThread(); |
| 406 InitCache(); |
| 407 std::string key("the first key"); |
| 408 disk_cache::Entry* entry; |
| 409 ASSERT_EQ(net::OK, CreateEntry(key, &entry)); |
| 410 |
| 411 const int kSize = 40 * 1024; |
| 412 scoped_refptr<net::IOBuffer> buf(new net::IOBuffer(kSize)); |
| 413 CacheTestFillBuffer(buf->data(), kSize, false); |
| 414 |
| 415 // This will open and write two "real" entries. |
| 416 net::TestCompletionCallback cb1, cb2, cb3, cb4, cb5; |
| 417 int rv = entry->WriteSparseData( |
| 418 1024 * 1024 - 4096, buf.get(), kSize, cb1.callback()); |
| 419 EXPECT_EQ(net::ERR_IO_PENDING, rv); |
| 420 |
| 421 int64 offset = 0; |
| 422 rv = entry->GetAvailableRange(offset, kSize, &offset, cb5.callback()); |
| 423 rv = cb5.GetResult(rv); |
| 424 if (!cb1.have_result()) { |
| 425 // We may or may not have finished writing to the entry. If we have not, |
| 426 // we cannot start another operation at this time. |
| 427 EXPECT_EQ(net::ERR_CACHE_OPERATION_NOT_SUPPORTED, rv); |
| 428 } |
| 429 |
| 430 // We cancel the pending operation, and register multiple notifications. |
| 431 entry->CancelSparseIO(); |
| 432 EXPECT_EQ(net::ERR_IO_PENDING, entry->ReadyForSparseIO(cb2.callback())); |
| 433 EXPECT_EQ(net::ERR_IO_PENDING, entry->ReadyForSparseIO(cb3.callback())); |
| 434 entry->CancelSparseIO(); // Should be a no op at this point. |
| 435 EXPECT_EQ(net::ERR_IO_PENDING, entry->ReadyForSparseIO(cb4.callback())); |
| 436 |
| 437 if (!cb1.have_result()) { |
| 438 EXPECT_EQ(net::ERR_CACHE_OPERATION_NOT_SUPPORTED, |
| 439 entry->ReadSparseData( |
| 440 offset, buf.get(), kSize, net::CompletionCallback())); |
| 441 EXPECT_EQ(net::ERR_CACHE_OPERATION_NOT_SUPPORTED, |
| 442 entry->WriteSparseData( |
| 443 offset, buf.get(), kSize, net::CompletionCallback())); |
| 444 } |
| 445 |
| 446 // Now see if we receive all notifications. Note that we should not be able |
| 447 // to write everything (unless the timing of the system is really weird). |
| 448 rv = cb1.WaitForResult(); |
| 449 EXPECT_TRUE(rv == 4096 || rv == kSize); |
| 450 EXPECT_EQ(net::OK, cb2.WaitForResult()); |
| 451 EXPECT_EQ(net::OK, cb3.WaitForResult()); |
| 452 EXPECT_EQ(net::OK, cb4.WaitForResult()); |
| 453 |
| 454 rv = entry->GetAvailableRange(offset, kSize, &offset, cb5.callback()); |
| 455 EXPECT_EQ(0, cb5.GetResult(rv)); |
| 456 entry->Close(); |
| 457 } |
| 458 |
| 459 // Tests that we perform sanity checks on an entry's key. Note that there are |
| 460 // other tests that exercise sanity checks by using saved corrupt files. |
| 461 TEST_P(DiskCacheBlockfileEntryTest, KeySanityCheck) { |
| 462 UseCurrentThread(); |
| 463 InitCache(); |
| 464 std::string key("the first key"); |
| 465 disk_cache::Entry* entry; |
| 466 ASSERT_EQ(net::OK, CreateEntry(key, &entry)); |
| 467 |
| 468 disk_cache::EntryImpl* entry_impl = |
| 469 static_cast<disk_cache::EntryImpl*>(entry); |
| 470 disk_cache::EntryStore* store = entry_impl->entry()->Data(); |
| 471 |
| 472 // We have reserved space for a short key (one block), let's say that the key |
| 473 // takes more than one block, and remove the NULLs after the actual key. |
| 474 store->key_len = 800; |
| 475 memset(store->key + key.size(), 'k', sizeof(store->key) - key.size()); |
| 476 entry_impl->entry()->set_modified(); |
| 477 entry->Close(); |
| 478 |
| 479 // We have a corrupt entry. Now reload it. We should NOT read beyond the |
| 480 // allocated buffer here. |
| 481 ASSERT_NE(net::OK, OpenEntry(key, &entry)); |
| 482 //DisableIntegrityCheck(); |
| 483 } |
| 484 |
| 485 // Backend tests. |
| 486 |
| 487 // Tests that |BackendImpl| fails to initialize with a missing file. |
| 488 TEST_P(DiskCacheBlockfileBackendTest, CreateBackend_MissingFile) { |
| 489 ASSERT_TRUE(CopyTestCache("bad_entry")); |
| 490 base::FilePath filename = cache_path().AppendASCII("data_1"); |
| 491 base::DeleteFile(filename, false); |
| 492 base::Thread cache_thread("CacheThread"); |
| 493 ASSERT_TRUE(cache_thread.StartWithOptions( |
| 494 base::Thread::Options(base::MessageLoop::TYPE_IO, 0))); |
| 495 net::TestCompletionCallback cb; |
| 496 |
| 497 bool prev = base::ThreadRestrictions::SetIOAllowed(false); |
| 498 scoped_ptr<disk_cache::BackendImpl> cache(new disk_cache::BackendImpl( |
| 499 cache_path(), cache_thread.message_loop_proxy().get(), NULL)); |
| 500 int rv = cache->Init(cb.callback()); |
| 501 EXPECT_EQ(net::ERR_FAILED, cb.GetResult(rv)); |
| 502 base::ThreadRestrictions::SetIOAllowed(prev); |
| 503 |
| 504 cache.reset(); |
| 505 DisableIntegrityCheck(); |
| 506 } |
| 507 |
| 508 // TODO(gavinp): less lame. |
| 509 TEST_P(DiskCacheBlockfileBackendTest, TruncatedIndex) { |
| 510 base::FilePath index = cache_path().AppendASCII("index"); |
| 511 ASSERT_EQ(5, file_util::WriteFile(index, "hello", 5)); |
| 512 |
| 513 base::Thread cache_thread("CacheThread"); |
| 514 ASSERT_TRUE(cache_thread.StartWithOptions( |
| 515 base::Thread::Options(base::MessageLoop::TYPE_IO, 0))); |
| 516 net::TestCompletionCallback cb; |
| 517 |
| 518 scoped_ptr<disk_cache::Backend> backend; |
| 519 int rv = |
| 520 disk_cache::CreateCacheBackend(net::DISK_CACHE, |
| 521 net::CACHE_BACKEND_BLOCKFILE, |
| 522 cache_path(), |
| 523 0, |
| 524 false, |
| 525 cache_thread.message_loop_proxy().get(), |
| 526 NULL, |
| 527 &backend, |
| 528 cb.callback()); |
| 529 ASSERT_NE(net::OK, cb.GetResult(rv)); |
| 530 |
| 531 ASSERT_FALSE(backend); |
| 532 } |
| 533 |
| 534 // Tests the chaining of an entry to the current head. |
| 535 TEST_P(DiskCacheBlockfileBackendTest, Chain) { |
| 536 SetMask(0x1); // 2-entry table. |
| 537 SetMaxSize(0x3000); // 12 kB. |
| 538 InitCache(); |
| 539 |
| 540 disk_cache::Entry* entry; |
| 541 ASSERT_EQ(net::OK, CreateEntry("The first key", &entry)); |
| 542 entry->Close(); |
| 543 ASSERT_EQ(net::OK, CreateEntry("The Second key", &entry)); |
| 544 entry->Close(); |
| 545 } |
| 546 |
| 547 TEST_P(DiskCacheBlockfileBackendTest, NewEvictionTrim) { |
| 548 TEST_DISABLED_IF( |
| 549 (blockfile_traits()->flags() & kNewEviction) != kNewEviction); |
| 550 InitCache(); |
| 551 |
| 552 disk_cache::Entry* entry; |
| 553 for (int i = 0; i < 100; i++) { |
| 554 std::string name(base::StringPrintf("Key %d", i)); |
| 555 ASSERT_EQ(net::OK, CreateEntry(name, &entry)); |
| 556 entry->Close(); |
| 557 if (i < 90) { |
| 558 // Entries 0 to 89 are in list 1; 90 to 99 are in list 0. |
| 559 ASSERT_EQ(net::OK, OpenEntry(name, &entry)); |
| 560 entry->Close(); |
| 561 } |
| 562 } |
| 563 |
| 564 // The first eviction must come from list 1 (10% limit), the second must come |
| 565 // from list 0. |
| 566 TrimForTest(false); |
| 567 EXPECT_NE(net::OK, OpenEntry("Key 0", &entry)); |
| 568 TrimForTest(false); |
| 569 EXPECT_NE(net::OK, OpenEntry("Key 90", &entry)); |
| 570 |
| 571 // Double check that we still have the list tails. |
| 572 ASSERT_EQ(net::OK, OpenEntry("Key 1", &entry)); |
| 573 entry->Close(); |
| 574 ASSERT_EQ(net::OK, OpenEntry("Key 91", &entry)); |
| 575 entry->Close(); |
| 576 } |
| 577 |
| 578 TEST_P(DiskCacheBlockfileBackendTest, ExternalFiles) { |
| 579 InitCache(); |
| 580 // First, let's create a file on the folder. |
| 581 base::FilePath filename = cache_path().AppendASCII("f_000001"); |
| 582 |
| 583 const int kSize = 50; |
| 584 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize)); |
| 585 CacheTestFillBuffer(buffer1->data(), kSize, false); |
| 586 ASSERT_EQ(kSize, file_util::WriteFile(filename, buffer1->data(), kSize)); |
| 587 |
| 588 // Now let's create a file with the cache. |
| 589 disk_cache::Entry* entry; |
| 590 ASSERT_EQ(net::OK, CreateEntry("key", &entry)); |
| 591 ASSERT_EQ(0, WriteData(entry, 0, 20000, buffer1.get(), 0, false)); |
| 592 entry->Close(); |
| 593 |
| 594 // And verify that the first file is still there. |
| 595 scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize)); |
| 596 ASSERT_EQ(kSize, base::ReadFile(filename, buffer2->data(), kSize)); |
| 597 EXPECT_EQ(0, memcmp(buffer1->data(), buffer2->data(), kSize)); |
| 598 } |
| 599 |
| 600 // Before looking for invalid entries, let's check a valid entry. |
| 601 TEST_P(DiskCacheBlockfileBackendTest, ValidEntry) { |
| 602 InitCache(); |
| 603 |
| 604 std::string key("Some key"); |
| 605 disk_cache::Entry* entry; |
| 606 ASSERT_EQ(net::OK, CreateEntry(key, &entry)); |
| 607 |
| 608 const int kSize = 50; |
| 609 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize)); |
| 610 memset(buffer1->data(), 0, kSize); |
| 611 base::strlcpy(buffer1->data(), "And the data to save", kSize); |
| 612 EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer1.get(), kSize, false)); |
| 613 entry->Close(); |
| 614 SimulateCrash(); |
| 615 |
| 616 ASSERT_EQ(net::OK, OpenEntry(key, &entry)); |
| 617 |
| 618 scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize)); |
| 619 memset(buffer2->data(), 0, kSize); |
| 620 EXPECT_EQ(kSize, ReadData(entry, 0, 0, buffer2.get(), kSize)); |
| 621 entry->Close(); |
| 622 EXPECT_STREQ(buffer1->data(), buffer2->data()); |
| 623 } |
| 624 |
| 625 // The same logic of the previous test (ValidEntry), but this time force the |
| 626 // entry to be invalid, simulating a crash in the middle. |
| 627 // We'll be leaking memory from this test. |
| 628 TEST_P(DiskCacheBlockfileBackendTest, InvalidEntry) { |
| 629 InitCache(); |
| 630 |
| 631 std::string key("Some key"); |
| 632 disk_cache::Entry* entry; |
| 633 ASSERT_EQ(net::OK, CreateEntry(key, &entry)); |
| 634 |
| 635 const int kSize = 50; |
| 636 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize)); |
| 637 memset(buffer->data(), 0, kSize); |
| 638 base::strlcpy(buffer->data(), "And the data to save", kSize); |
| 639 EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false)); |
| 640 SimulateCrash(); |
| 641 |
| 642 EXPECT_NE(net::OK, OpenEntry(key, &entry)); |
| 643 EXPECT_EQ(0, cache()->GetEntryCount()); |
| 644 } |
| 645 |
| 646 // Almost the same test, but this time crash the cache after reading an entry. |
| 647 // We'll be leaking memory from this test. |
| 648 TEST_P(DiskCacheBlockfileBackendTest, InvalidEntryRead) { |
| 649 InitCache(); |
| 650 |
| 651 std::string key("Some key"); |
| 652 disk_cache::Entry* entry; |
| 653 ASSERT_EQ(net::OK, CreateEntry(key, &entry)); |
| 654 |
| 655 const int kSize = 50; |
| 656 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize)); |
| 657 memset(buffer->data(), 0, kSize); |
| 658 base::strlcpy(buffer->data(), "And the data to save", kSize); |
| 659 EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false)); |
| 660 entry->Close(); |
| 661 ASSERT_EQ(net::OK, OpenEntry(key, &entry)); |
| 662 EXPECT_EQ(kSize, ReadData(entry, 0, 0, buffer.get(), kSize)); |
| 663 |
| 664 SimulateCrash(); |
| 665 |
| 666 if (cache_impl()->cache_type() == net::APP_CACHE) { |
| 667 // Reading an entry and crashing should not make it dirty. |
| 668 ASSERT_EQ(net::OK, OpenEntry(key, &entry)); |
| 669 EXPECT_EQ(1, cache()->GetEntryCount()); |
| 670 entry->Close(); |
| 671 } else { |
| 672 EXPECT_NE(net::OK, OpenEntry(key, &entry)); |
| 673 EXPECT_EQ(0, cache()->GetEntryCount()); |
| 674 } |
| 675 } |
| 676 |
| 677 // We'll be leaking memory from this test. |
| 678 TEST_P(DiskCacheBlockfileBackendTest, InvalidEntryWithLoad) { |
| 679 // Work with a tiny index table (16 entries) |
| 680 SetMask(0x1); // 2-entry table. |
| 681 SetMaxSize(0x100000); |
| 682 InitCache(); |
| 683 |
| 684 int seed = static_cast<int>(base::Time::Now().ToInternalValue()); |
| 685 srand(seed); |
| 686 |
| 687 const int kNumEntries = 100; |
| 688 disk_cache::Entry* entries[kNumEntries]; |
| 689 for (int i = 0; i < kNumEntries; i++) { |
| 690 std::string key = GenerateKey(true); |
| 691 ASSERT_EQ(net::OK, CreateEntry(key, &entries[i])); |
| 692 } |
| 693 EXPECT_EQ(kNumEntries, cache()->GetEntryCount()); |
| 694 |
| 695 for (int i = 0; i < kNumEntries; i++) { |
| 696 int source1 = rand() % kNumEntries; |
| 697 int source2 = rand() % kNumEntries; |
| 698 disk_cache::Entry* temp = entries[source1]; |
| 699 entries[source1] = entries[source2]; |
| 700 entries[source2] = temp; |
| 701 } |
| 702 |
| 703 std::string keys[kNumEntries]; |
| 704 for (int i = 0; i < kNumEntries; i++) { |
| 705 keys[i] = entries[i]->GetKey(); |
| 706 if (i < kNumEntries / 2) |
| 707 entries[i]->Close(); |
| 708 } |
| 709 |
| 710 SimulateCrash(); |
| 711 |
| 712 for (int i = kNumEntries / 2; i < kNumEntries; i++) { |
| 713 disk_cache::Entry* entry; |
| 714 EXPECT_NE(net::OK, OpenEntry(keys[i], &entry)); |
| 715 } |
| 716 |
| 717 for (int i = 0; i < kNumEntries / 2; i++) { |
| 718 disk_cache::Entry* entry; |
| 719 ASSERT_EQ(net::OK, OpenEntry(keys[i], &entry)); |
| 720 entry->Close(); |
| 721 } |
| 722 |
| 723 EXPECT_EQ(kNumEntries / 2, cache()->GetEntryCount()); |
| 724 } |
| 725 |
| 726 TEST_P(DiskCacheBlockfileBackendTest, TrimInvalidEntry) { |
| 727 const int kSize = 0x3000; // 12 kB |
| 728 SetMaxSize(kSize * 10); |
| 729 InitCache(); |
| 730 |
| 731 std::string first("some key"); |
| 732 std::string second("something else"); |
| 733 disk_cache::Entry* entry; |
| 734 ASSERT_EQ(net::OK, CreateEntry(first, &entry)); |
| 735 |
| 736 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize)); |
| 737 memset(buffer->data(), 0, kSize); |
| 738 EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false)); |
| 739 |
| 740 // Simulate a crash. |
| 741 SimulateCrash(); |
| 742 |
| 743 ASSERT_EQ(net::OK, CreateEntry(second, &entry)); |
| 744 EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false)); |
| 745 |
| 746 EXPECT_EQ(2, cache()->GetEntryCount()); |
| 747 SetMaxSize(kSize); |
| 748 entry->Close(); // Trim the cache. |
| 749 FlushQueueForTest(); |
| 750 |
| 751 // If we evicted the entry in less than 20mS, we have one entry in the cache; |
| 752 // if it took more than that, we posted a task and we'll delete the second |
| 753 // entry too. |
| 754 base::MessageLoop::current()->RunUntilIdle(); |
| 755 |
| 756 // This may be not thread-safe in general, but for now it's OK so add some |
| 757 // ThreadSanitizer annotations to ignore data races on cache_. |
| 758 // See http://crbug.com/55970 |
| 759 ANNOTATE_IGNORE_READS_BEGIN(); |
| 760 EXPECT_GE(1, cache()->GetEntryCount()); |
| 761 ANNOTATE_IGNORE_READS_END(); |
| 762 |
| 763 EXPECT_NE(net::OK, OpenEntry(first, &entry)); |
| 764 } |
| 765 |
| 766 // We'll be leaking memory from this test. |
| 767 TEST_P(DiskCacheBlockfileBackendTest, TrimInvalidEntry2) { |
| 768 SetMask(0xf); // 16-entry table. |
| 769 |
| 770 const int kSize = 0x3000; // 12 kB |
| 771 SetMaxSize(kSize * 40); |
| 772 InitCache(); |
| 773 |
| 774 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize)); |
| 775 memset(buffer->data(), 0, kSize); |
| 776 disk_cache::Entry* entry; |
| 777 |
| 778 // Writing 32 entries to this cache chains most of them. |
| 779 for (int i = 0; i < 32; i++) { |
| 780 std::string key(base::StringPrintf("some key %d", i)); |
| 781 ASSERT_EQ(net::OK, CreateEntry(key, &entry)); |
| 782 EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false)); |
| 783 entry->Close(); |
| 784 ASSERT_EQ(net::OK, OpenEntry(key, &entry)); |
| 785 // Note that we are not closing the entries. |
| 786 } |
| 787 |
| 788 // Simulate a crash. |
| 789 SimulateCrash(); |
| 790 |
| 791 ASSERT_EQ(net::OK, CreateEntry("Something else", &entry)); |
| 792 EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false)); |
| 793 |
| 794 FlushQueueForTest(); |
| 795 EXPECT_EQ(33, cache()->GetEntryCount()); |
| 796 SetMaxSize(kSize); |
| 797 |
| 798 // For the new eviction code, all corrupt entries are on the second list so |
| 799 // they are not going away that easy. |
| 800 // if (new_eviction_) { |
| 801 EXPECT_EQ(net::OK, DoomAllEntries()); |
| 802 //} |
| 803 |
| 804 entry->Close(); // Trim the cache. |
| 805 FlushQueueForTest(); |
| 806 |
| 807 // We may abort the eviction before cleaning up everything. |
| 808 base::MessageLoop::current()->RunUntilIdle(); |
| 809 FlushQueueForTest(); |
| 810 // If it's not clear enough: we may still have eviction tasks running at this |
| 811 // time, so the number of entries is changing while we read it. |
| 812 ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN(); |
| 813 EXPECT_GE(30, cache()->GetEntryCount()); |
| 814 ANNOTATE_IGNORE_READS_AND_WRITES_END(); |
| 815 } |
| 816 |
| 817 void BackendTransaction(DiskCacheBlockfileBackendTest* test, |
| 818 const std::string& name, |
| 819 int num_entries, bool load) { |
| 820 // success_ = false; |
| 821 ASSERT_TRUE(test->CopyTestCache(name)); |
| 822 // DisableFirstCleanup(); |
| 823 |
| 824 uint32 mask; |
| 825 if (load) { |
| 826 mask = 0xf; |
| 827 test->SetMaxSize(0x100000); |
| 828 } else { |
| 829 // Clear the settings from the previous run. |
| 830 mask = 0; |
| 831 test->SetMaxSize(0); |
| 832 } |
| 833 test->SetMask(mask); |
| 834 |
| 835 test->InitCache(); |
| 836 ASSERT_EQ(num_entries + 1, test->cache()->GetEntryCount()); |
| 837 |
| 838 std::string key("the first key"); |
| 839 disk_cache::Entry* entry1; |
| 840 ASSERT_NE(net::OK, test->OpenEntry(key, &entry1)); |
| 841 |
| 842 int actual = test->cache()->GetEntryCount(); |
| 843 if (num_entries != actual) { |
| 844 ASSERT_TRUE(load); |
| 845 // If there is a heavy load, inserting an entry will make another entry |
| 846 // dirty (on the hash bucket) so two entries are removed. |
| 847 ASSERT_EQ(num_entries - 1, actual); |
| 848 } |
| 849 |
| 850 // test->cache().reset(); |
| 851 // cache_impl_ = NULL; |
| 852 |
| 853 // ASSERT_TRUE(CheckCacheIntegrity(cache_path_, new_eviction_, mask)); |
| 854 //success_ = true; |
| 855 } |
| 856 |
| 857 TEST_P(DiskCacheBlockfileBackendTest, RecoverInsert) { |
| 858 // Tests with an empty cache. |
| 859 ASSERT_NO_FATAL_FAILURE(BackendTransaction(this, "insert_empty1", 0, false)); |
| 860 ASSERT_NO_FATAL_FAILURE(BackendTransaction(this, "insert_empty2", 0, false)); |
| 861 ASSERT_NO_FATAL_FAILURE(BackendTransaction(this, "insert_empty3", 0, false)); |
| 862 |
| 863 // Tests with one entry on the cache. |
| 864 ASSERT_NO_FATAL_FAILURE(BackendTransaction(this, "insert_one1", 1, false)); |
| 865 ASSERT_NO_FATAL_FAILURE(BackendTransaction(this, "insert_one2", 1, false)); |
| 866 ASSERT_NO_FATAL_FAILURE(BackendTransaction(this, "insert_one3", 1, false)); |
| 867 |
| 868 // Tests with one hundred entries on the cache, tiny index. |
| 869 ASSERT_NO_FATAL_FAILURE(BackendTransaction(this, "insert_load1", 100, true)); |
| 870 ASSERT_NO_FATAL_FAILURE(BackendTransaction(this, "insert_load2", 100, true)); |
| 871 } |
| 872 |
| 873 TEST_P(DiskCacheBlockfileBackendTest, RecoverRemove) { |
| 874 // Removing the only element. |
| 875 ASSERT_NO_FATAL_FAILURE(BackendTransaction(this, "remove_one1", 0, false)); |
| 876 ASSERT_NO_FATAL_FAILURE(BackendTransaction(this, "remove_one2", 0, false)); |
| 877 ASSERT_NO_FATAL_FAILURE(BackendTransaction(this, "remove_one3", 0, false)); |
| 878 |
| 879 // Removing the head. |
| 880 ASSERT_NO_FATAL_FAILURE(BackendTransaction(this, "remove_head1", 1, false)); |
| 881 ASSERT_NO_FATAL_FAILURE(BackendTransaction(this, "remove_head2", 1, false)); |
| 882 ASSERT_NO_FATAL_FAILURE(BackendTransaction(this, "remove_head3", 1, false)); |
| 883 |
| 884 // Removing the tail. |
| 885 ASSERT_NO_FATAL_FAILURE(BackendTransaction(this, "remove_tail1", 1, false)); |
| 886 ASSERT_NO_FATAL_FAILURE(BackendTransaction(this, "remove_tail2", 1, false)); |
| 887 ASSERT_NO_FATAL_FAILURE(BackendTransaction(this, "remove_tail3", 1, false)); |
| 888 |
| 889 // Removing with one hundred entries on the cache, tiny index. |
| 890 ASSERT_NO_FATAL_FAILURE(BackendTransaction(this, "remove_load1", 100, true)); |
| 891 ASSERT_NO_FATAL_FAILURE(BackendTransaction(this, "remove_load2", 100, true)); |
| 892 ASSERT_NO_FATAL_FAILURE(BackendTransaction(this, "remove_load3", 100, true)); |
| 893 |
| 894 // This case cannot be reverted. |
| 895 ASSERT_NO_FATAL_FAILURE(BackendTransaction(this, "remove_one4", 0, false)); |
| 896 ASSERT_NO_FATAL_FAILURE(BackendTransaction(this, "remove_head4", 1, false)); |
| 897 } |
| 898 |
| 899 TEST_P(DiskCacheBlockfileBackendTest, RecoverWithEviction) { |
| 900 ASSERT_TRUE(CopyTestCache("insert_load1")); |
| 901 //DisableFirstCleanup(); |
| 902 |
| 903 SetMask(0xf); |
| 904 SetMaxSize(0x1000); |
| 905 |
| 906 // We should not crash here. |
| 907 InitCache(); |
| 908 DisableIntegrityCheck(); |
| 909 } |
| 910 |
| 911 // Tests that the |BackendImpl| fails to start with the wrong cache version. |
| 912 TEST_P(DiskCacheBlockfileBackendTest, WrongVersion) { |
| 913 ASSERT_TRUE(CopyTestCache("wrong_version")); |
| 914 base::Thread cache_thread("CacheThread"); |
| 915 ASSERT_TRUE(cache_thread.StartWithOptions( |
| 916 base::Thread::Options(base::MessageLoop::TYPE_IO, 0))); |
| 917 net::TestCompletionCallback cb; |
| 918 |
| 919 scoped_ptr<disk_cache::BackendImpl> cache(new disk_cache::BackendImpl( |
| 920 cache_path(), cache_thread.message_loop_proxy().get(), NULL)); |
| 921 int rv = cache->Init(cb.callback()); |
| 922 ASSERT_EQ(net::ERR_FAILED, cb.GetResult(rv)); |
| 923 } |
| 924 |
| 925 #if 0 |
| 926 // Tests that the cache is properly restarted on recovery error. |
| 927 TEST_P(DiskCacheBlockfileBackendTest, DeleteOld) { |
| 928 ASSERT_TRUE(CopyTestCache("wrong_version")); |
| 929 base::Thread cache_thread("CacheThread"); |
| 930 ASSERT_TRUE(cache_thread.StartWithOptions( |
| 931 base::Thread::Options(base::MessageLoop::TYPE_IO, 0))); |
| 932 |
| 933 net::TestCompletionCallback cb; |
| 934 bool prev = base::ThreadRestrictions::SetIOAllowed(false); |
| 935 base::FilePath path(cache_path()); |
| 936 int rv = |
| 937 disk_cache::CreateCacheBackend(net::DISK_CACHE, |
| 938 net::CACHE_BACKEND_BLOCKFILE, |
| 939 path, |
| 940 0, |
| 941 true, |
| 942 cache_thread.message_loop_proxy().get(), |
| 943 NULL, |
| 944 cache(), |
| 945 cb.callback()); |
| 946 path.clear(); // Make sure path was captured by the previous call. |
| 947 ASSERT_EQ(net::OK, cb.GetResult(rv)); |
| 948 base::ThreadRestrictions::SetIOAllowed(prev); |
| 949 // cache_.reset(); |
| 950 //EXPECT_TRUE(CheckCacheIntegrity(cache_path(), new_eviction_, mask_)); |
| 951 } |
| 952 #endif |
| 953 |
| 954 // We want to be able to deal with messed up entries on disk. |
| 955 TEST_P(DiskCacheBlockfileBackendTest, InvalidEntry2) { |
| 956 ASSERT_TRUE(CopyTestCache("bad_entry")); |
| 957 //DisableFirstCleanup(); |
| 958 InitCache(); |
| 959 |
| 960 disk_cache::Entry *entry1, *entry2; |
| 961 ASSERT_EQ(net::OK, OpenEntry("the first key", &entry1)); |
| 962 EXPECT_NE(net::OK, OpenEntry("some other key", &entry2)); |
| 963 entry1->Close(); |
| 964 |
| 965 // CheckCacheIntegrity will fail at this point. |
| 966 //DisableIntegrityCheck(); |
| 967 } |
| 968 |
| 969 // Tests that we don't crash or hang when enumerating this cache. |
| 970 TEST_P(DiskCacheBlockfileBackendTest, InvalidEntry3) { |
| 971 SetMask(0x1); // 2-entry table. |
| 972 SetMaxSize(0x3000); // 12 kB. |
| 973 InitCache(); |
| 974 |
| 975 disk_cache::Entry* entry; |
| 976 void* iter = NULL; |
| 977 while (OpenNextEntry(&iter, &entry) == net::OK) { |
| 978 entry->Close(); |
| 979 } |
| 980 } |
| 981 |
| 982 // Test that we handle a dirty entry on the LRU list, already replaced with |
| 983 // the same key, and with hash collisions. |
| 984 TEST_P(DiskCacheBlockfileBackendTest, InvalidEntry4) { |
| 985 ASSERT_TRUE(CopyTestCache("dirty_entry3")); |
| 986 SetMask(0x1); // 2-entry table. |
| 987 SetMaxSize(0x3000); // 12 kB. |
| 988 //DisableFirstCleanup(); |
| 989 InitCache(); |
| 990 |
| 991 TrimForTest(false); |
| 992 } |
| 993 |
| 994 // Test that we handle a dirty entry on the deleted list, already replaced with |
| 995 // the same key, and with hash collisions. |
| 996 TEST_P(DiskCacheBlockfileBackendTest, DISABLED_InvalidEntry5) { |
| 997 ASSERT_TRUE(CopyTestCache("dirty_entry4")); |
| 998 // SetNewEviction(); |
| 999 SetMask(0x1); // 2-entry table. |
| 1000 SetMaxSize(0x3000); // 12 kB. |
| 1001 //DisableFirstCleanup(); |
| 1002 InitCache(); |
| 1003 |
| 1004 TrimDeletedListForTest(false); |
| 1005 } |
| 1006 |
| 1007 TEST_P(DiskCacheBlockfileBackendTest, InvalidEntry6) { |
| 1008 ASSERT_TRUE(CopyTestCache("dirty_entry5")); |
| 1009 SetMask(0x1); // 2-entry table. |
| 1010 SetMaxSize(0x3000); // 12 kB. |
| 1011 InitCache(); |
| 1012 |
| 1013 // There is a dirty entry (but marked as clean) at the end, pointing to a |
| 1014 // deleted entry through the hash collision list. We should not re-insert the |
| 1015 // deleted entry into the index table. |
| 1016 |
| 1017 TrimForTest(false); |
| 1018 // The cache should be clean (as detected by CheckCacheIntegrity). |
| 1019 } |
| 1020 |
| 1021 // Tests that we don't hang when there is a loop on the hash collision list. |
| 1022 // The test cache could be a result of bug 69135. |
| 1023 TEST_P(DiskCacheBlockfileBackendTest, BadNextEntry1) { |
| 1024 ASSERT_TRUE(CopyTestCache("list_loop2")); |
| 1025 SetMask(0x1); // 2-entry table. |
| 1026 SetMaxSize(0x3000); // 12 kB. |
| 1027 InitCache(); |
| 1028 |
| 1029 // The second entry points at itselft, and the first entry is not accessible |
| 1030 // though the index, but it is at the head of the LRU. |
| 1031 |
| 1032 disk_cache::Entry* entry; |
| 1033 ASSERT_EQ(net::OK, CreateEntry("The first key", &entry)); |
| 1034 entry->Close(); |
| 1035 |
| 1036 TrimForTest(false); |
| 1037 TrimForTest(false); |
| 1038 ASSERT_EQ(net::OK, OpenEntry("The first key", &entry)); |
| 1039 entry->Close(); |
| 1040 EXPECT_EQ(1, cache()->GetEntryCount()); |
| 1041 } |
| 1042 |
| 1043 // Tests that we don't hang when there is a loop on the hash collision list. |
| 1044 // The test cache could be a result of bug 69135. |
| 1045 TEST_P(DiskCacheBlockfileBackendTest, BadNextEntry2) { |
| 1046 ASSERT_TRUE(CopyTestCache("list_loop3")); |
| 1047 SetMask(0x1); // 2-entry table. |
| 1048 SetMaxSize(0x3000); // 12 kB. |
| 1049 InitCache(); |
| 1050 |
| 1051 // There is a wide loop of 5 entries. |
| 1052 |
| 1053 disk_cache::Entry* entry; |
| 1054 ASSERT_NE(net::OK, OpenEntry("Not present key", &entry)); |
| 1055 } |
| 1056 |
| 1057 TEST_P(DiskCacheBlockfileBackendTest, DISABLED_NewEvictionInvalidEntry6) { |
| 1058 ASSERT_TRUE(CopyTestCache("bad_rankings3")); |
| 1059 //DisableFirstCleanup(); |
| 1060 //SetNewEviction(); |
| 1061 InitCache(); |
| 1062 |
| 1063 // The second entry is dirty, but removing it should not corrupt the list. |
| 1064 disk_cache::Entry* entry; |
| 1065 ASSERT_NE(net::OK, OpenEntry("the second key", &entry)); |
| 1066 ASSERT_EQ(net::OK, OpenEntry("the first key", &entry)); |
| 1067 |
| 1068 // This should not delete the cache. |
| 1069 entry->Doom(); |
| 1070 FlushQueueForTest(); |
| 1071 entry->Close(); |
| 1072 |
| 1073 ASSERT_EQ(net::OK, OpenEntry("some other key", &entry)); |
| 1074 entry->Close(); |
| 1075 } |
| 1076 |
| 1077 // Tests handling of corrupt entries by keeping the rankings node around, with |
| 1078 // a fatal failure. |
| 1079 TEST_P(DiskCacheBlockfileBackendTest, InvalidEntry7) { |
| 1080 const int kSize = 0x3000; // 12 kB. |
| 1081 SetMaxSize(kSize * 10); |
| 1082 InitCache(); |
| 1083 |
| 1084 std::string first("some key"); |
| 1085 std::string second("something else"); |
| 1086 disk_cache::Entry* entry; |
| 1087 ASSERT_EQ(net::OK, CreateEntry(first, &entry)); |
| 1088 entry->Close(); |
| 1089 ASSERT_EQ(net::OK, CreateEntry(second, &entry)); |
| 1090 |
| 1091 // Corrupt this entry. |
| 1092 disk_cache::EntryImpl* entry_impl = |
| 1093 static_cast<disk_cache::EntryImpl*>(entry); |
| 1094 |
| 1095 entry_impl->rankings()->Data()->next = 0; |
| 1096 entry_impl->rankings()->Store(); |
| 1097 entry->Close(); |
| 1098 FlushQueueForTest(); |
| 1099 EXPECT_EQ(2, cache()->GetEntryCount()); |
| 1100 |
| 1101 // This should detect the bad entry. |
| 1102 EXPECT_NE(net::OK, OpenEntry(second, &entry)); |
| 1103 EXPECT_EQ(1, cache()->GetEntryCount()); |
| 1104 |
| 1105 // We should delete the cache. The list still has a corrupt node. |
| 1106 void* iter = NULL; |
| 1107 EXPECT_NE(net::OK, OpenNextEntry(&iter, &entry)); |
| 1108 FlushQueueForTest(); |
| 1109 EXPECT_EQ(0, cache()->GetEntryCount()); |
| 1110 } |
| 1111 |
| 1112 // Tests handling of corrupt entries by keeping the rankings node around, with |
| 1113 // a non fatal failure. |
| 1114 TEST_P(DiskCacheBlockfileBackendTest, InvalidEntry8) { |
| 1115 const int kSize = 0x3000; // 12 kB |
| 1116 SetMaxSize(kSize * 10); |
| 1117 InitCache(); |
| 1118 |
| 1119 std::string first("some key"); |
| 1120 std::string second("something else"); |
| 1121 disk_cache::Entry* entry; |
| 1122 ASSERT_EQ(net::OK, CreateEntry(first, &entry)); |
| 1123 entry->Close(); |
| 1124 ASSERT_EQ(net::OK, CreateEntry(second, &entry)); |
| 1125 |
| 1126 // Corrupt this entry. |
| 1127 disk_cache::EntryImpl* entry_impl = |
| 1128 static_cast<disk_cache::EntryImpl*>(entry); |
| 1129 |
| 1130 entry_impl->rankings()->Data()->contents = 0; |
| 1131 entry_impl->rankings()->Store(); |
| 1132 entry->Close(); |
| 1133 FlushQueueForTest(); |
| 1134 EXPECT_EQ(2, cache()->GetEntryCount()); |
| 1135 |
| 1136 // This should detect the bad entry. |
| 1137 EXPECT_NE(net::OK, OpenEntry(second, &entry)); |
| 1138 EXPECT_EQ(1, cache()->GetEntryCount()); |
| 1139 |
| 1140 // We should not delete the cache. |
| 1141 void* iter = NULL; |
| 1142 ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry)); |
| 1143 entry->Close(); |
| 1144 EXPECT_NE(net::OK, OpenNextEntry(&iter, &entry)); |
| 1145 EXPECT_EQ(1, cache()->GetEntryCount()); |
| 1146 } |
| 1147 |
| 1148 #if 0 |
| 1149 // Tests handling of corrupt entries detected by enumerations. |
| 1150 void DiskCacheBackendTest::BackendInvalidEntry10(bool eviction) { |
| 1151 const int kSize = 0x3000; // 12 kB. |
| 1152 SetMaxSize(kSize * 10); |
| 1153 SetNewEviction(); |
| 1154 InitCache(); |
| 1155 |
| 1156 std::string first("some key"); |
| 1157 std::string second("something else"); |
| 1158 disk_cache::Entry* entry; |
| 1159 ASSERT_EQ(net::OK, CreateEntry(first, &entry)); |
| 1160 entry->Close(); |
| 1161 ASSERT_EQ(net::OK, OpenEntry(first, &entry)); |
| 1162 EXPECT_EQ(0, WriteData(entry, 0, 200, NULL, 0, false)); |
| 1163 entry->Close(); |
| 1164 ASSERT_EQ(net::OK, CreateEntry(second, &entry)); |
| 1165 |
| 1166 // Corrupt this entry. |
| 1167 disk_cache::EntryImpl* entry_impl = |
| 1168 static_cast<disk_cache::EntryImpl*>(entry); |
| 1169 |
| 1170 entry_impl->entry()->Data()->state = 0xbad; |
| 1171 entry_impl->entry()->Store(); |
| 1172 entry->Close(); |
| 1173 ASSERT_EQ(net::OK, CreateEntry("third", &entry)); |
| 1174 entry->Close(); |
| 1175 EXPECT_EQ(3, cache()->GetEntryCount()); |
| 1176 |
| 1177 // We have: |
| 1178 // List 0: third -> second (bad). |
| 1179 // List 1: first. |
| 1180 |
| 1181 if (eviction) { |
| 1182 // Detection order: second -> first -> third. |
| 1183 TrimForTest(false); |
| 1184 EXPECT_EQ(3, cache()->GetEntryCount()); |
| 1185 TrimForTest(false); |
| 1186 EXPECT_EQ(2, cache()->GetEntryCount()); |
| 1187 TrimForTest(false); |
| 1188 EXPECT_EQ(1, cache()->GetEntryCount()); |
| 1189 } else { |
| 1190 // Detection order: third -> second -> first. |
| 1191 // We should detect the problem through the list, but we should not delete |
| 1192 // the entry. |
| 1193 void* iter = NULL; |
| 1194 ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry)); |
| 1195 entry->Close(); |
| 1196 ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry)); |
| 1197 EXPECT_EQ(first, entry->GetKey()); |
| 1198 entry->Close(); |
| 1199 EXPECT_NE(net::OK, OpenNextEntry(&iter, &entry)); |
| 1200 } |
| 1201 DisableIntegrityCheck(); |
| 1202 } |
| 1203 #endif |
| 1204 |
| 1205 // Tests handling of corrupt entries in the middle of a long eviction run. |
| 1206 TEST_P(DiskCacheBlockfileBackendTest, TrimInvalidEntry12) { |
| 1207 const int kSize = 0x3000; // 12 kB |
| 1208 SetMaxSize(kSize * 10); |
| 1209 InitCache(); |
| 1210 |
| 1211 std::string first("some key"); |
| 1212 std::string second("something else"); |
| 1213 disk_cache::Entry* entry; |
| 1214 ASSERT_EQ(net::OK, CreateEntry(first, &entry)); |
| 1215 entry->Close(); |
| 1216 ASSERT_EQ(net::OK, CreateEntry(second, &entry)); |
| 1217 |
| 1218 // Corrupt this entry. |
| 1219 disk_cache::EntryImpl* entry_impl = |
| 1220 static_cast<disk_cache::EntryImpl*>(entry); |
| 1221 |
| 1222 entry_impl->entry()->Data()->state = 0xbad; |
| 1223 entry_impl->entry()->Store(); |
| 1224 entry->Close(); |
| 1225 ASSERT_EQ(net::OK, CreateEntry("third", &entry)); |
| 1226 entry->Close(); |
| 1227 ASSERT_EQ(net::OK, CreateEntry("fourth", &entry)); |
| 1228 TrimForTest(true); |
| 1229 EXPECT_EQ(1, cache()->GetEntryCount()); |
| 1230 entry->Close(); |
| 1231 DisableIntegrityCheck(); |
| 1232 } |
| 1233 |
| 1234 #if 0 |
| 1235 // If the LRU is corrupt, we delete the cache. |
| 1236 void DiskCacheBackendTest::BackendInvalidRankings() { |
| 1237 disk_cache::Entry* entry; |
| 1238 void* iter = NULL; |
| 1239 ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry)); |
| 1240 entry->Close(); |
| 1241 EXPECT_EQ(2, cache()->GetEntryCount()); |
| 1242 |
| 1243 EXPECT_NE(net::OK, OpenNextEntry(&iter, &entry)); |
| 1244 FlushQueueForTest(); // Allow the restart to finish. |
| 1245 EXPECT_EQ(0, cache()->GetEntryCount()); |
| 1246 } |
| 1247 #endif |
| 1248 |
| 1249 // We want to be able to deal with messed up entries on disk. |
| 1250 TEST_P(DiskCacheBlockfileBackendTest, InvalidRankings2) { |
| 1251 ASSERT_TRUE(CopyTestCache("bad_rankings")); |
| 1252 InitCache(); |
| 1253 |
| 1254 disk_cache::Entry *entry1, *entry2; |
| 1255 EXPECT_NE(net::OK, OpenEntry("the first key", &entry1)); |
| 1256 ASSERT_EQ(net::OK, OpenEntry("some other key", &entry2)); |
| 1257 entry2->Close(); |
| 1258 |
| 1259 // CheckCacheIntegrity will fail at this point. |
| 1260 DisableIntegrityCheck(); |
| 1261 } |
| 1262 |
| 1263 // If the index size changes when we disable the cache, we should not crash. |
| 1264 TEST_P(DiskCacheBlockfileBackendTest, DisableSucess3) { |
| 1265 ASSERT_TRUE(CopyTestCache("bad_rankings")); |
| 1266 InitCache(); |
| 1267 |
| 1268 disk_cache::Entry *entry1, *entry2; |
| 1269 void* iter = NULL; |
| 1270 EXPECT_EQ(2, cache()->GetEntryCount()); |
| 1271 ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry1)); |
| 1272 entry1->Close(); |
| 1273 |
| 1274 EXPECT_NE(net::OK, OpenNextEntry(&iter, &entry2)); |
| 1275 FlushQueueForTest(); |
| 1276 |
| 1277 ASSERT_EQ(net::OK, CreateEntry("Something new", &entry2)); |
| 1278 entry2->Close(); |
| 1279 |
| 1280 EXPECT_EQ(1, cache()->GetEntryCount()); |
| 1281 } |
| 1282 |
| 1283 // If we disable the cache, already open entries should work as far as possible. |
| 1284 TEST_P(DiskCacheBlockfileBackendTest, DisableSuccess4) { |
| 1285 ASSERT_TRUE(CopyTestCache("bad_rankings")); |
| 1286 InitCache(); |
| 1287 |
| 1288 disk_cache::Entry *entry1, *entry2, *entry3, *entry4; |
| 1289 void* iter = NULL; |
| 1290 ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry1)); |
| 1291 |
| 1292 char key2[2000]; |
| 1293 char key3[20000]; |
| 1294 CacheTestFillBuffer(key2, sizeof(key2), true); |
| 1295 CacheTestFillBuffer(key3, sizeof(key3), true); |
| 1296 key2[sizeof(key2) - 1] = '\0'; |
| 1297 key3[sizeof(key3) - 1] = '\0'; |
| 1298 ASSERT_EQ(net::OK, CreateEntry(key2, &entry2)); |
| 1299 ASSERT_EQ(net::OK, CreateEntry(key3, &entry3)); |
| 1300 |
| 1301 const int kBufSize = 20000; |
| 1302 scoped_refptr<net::IOBuffer> buf(new net::IOBuffer(kBufSize)); |
| 1303 memset(buf->data(), 0, kBufSize); |
| 1304 EXPECT_EQ(100, WriteData(entry2, 0, 0, buf.get(), 100, false)); |
| 1305 EXPECT_EQ(kBufSize, WriteData(entry3, 0, 0, buf.get(), kBufSize, false)); |
| 1306 |
| 1307 // This line should disable the cache but not delete it. |
| 1308 EXPECT_NE(net::OK, OpenNextEntry(&iter, &entry4)); |
| 1309 EXPECT_EQ(0, cache()->GetEntryCount()); |
| 1310 |
| 1311 EXPECT_NE(net::OK, CreateEntry("cache is disabled", &entry4)); |
| 1312 |
| 1313 EXPECT_EQ(100, ReadData(entry2, 0, 0, buf.get(), 100)); |
| 1314 EXPECT_EQ(100, WriteData(entry2, 0, 0, buf.get(), 100, false)); |
| 1315 EXPECT_EQ(100, WriteData(entry2, 1, 0, buf.get(), 100, false)); |
| 1316 |
| 1317 EXPECT_EQ(kBufSize, ReadData(entry3, 0, 0, buf.get(), kBufSize)); |
| 1318 EXPECT_EQ(kBufSize, WriteData(entry3, 0, 0, buf.get(), kBufSize, false)); |
| 1319 EXPECT_EQ(kBufSize, WriteData(entry3, 1, 0, buf.get(), kBufSize, false)); |
| 1320 |
| 1321 std::string key = entry2->GetKey(); |
| 1322 EXPECT_EQ(sizeof(key2) - 1, key.size()); |
| 1323 key = entry3->GetKey(); |
| 1324 EXPECT_EQ(sizeof(key3) - 1, key.size()); |
| 1325 |
| 1326 entry1->Close(); |
| 1327 entry2->Close(); |
| 1328 entry3->Close(); |
| 1329 FlushQueueForTest(); // Flushing the Close posts a task to restart the cache. |
| 1330 FlushQueueForTest(); // This one actually allows that task to complete. |
| 1331 |
| 1332 EXPECT_EQ(0, cache()->GetEntryCount()); |
| 1333 } |
| 1334 |
| 1335 TEST_P(DiskCacheBlockfileBackendTest, UsageStatsTimer) { |
| 1336 MessageLoopHelper helper; |
| 1337 |
| 1338 ASSERT_TRUE(CleanupCacheDir()); |
| 1339 scoped_ptr<disk_cache::BackendImpl> cache; |
| 1340 cache.reset(new disk_cache::BackendImpl( |
| 1341 cache_path(), base::MessageLoopProxy::current().get(), NULL)); |
| 1342 ASSERT_TRUE(NULL != cache.get()); |
| 1343 cache->SetUnitTestMode(); |
| 1344 ASSERT_EQ(net::OK, cache->SyncInit()); |
| 1345 |
| 1346 // Wait for a callback that never comes... about 2 secs :). The message loop |
| 1347 // has to run to allow invocation of the usage timer. |
| 1348 helper.WaitUntilCacheIoFinished(1); |
| 1349 } |
| 1350 |
| 1351 TEST_P(DiskCacheBlockfileBackendTest, TimerNotCreated) { |
| 1352 ASSERT_TRUE(CopyTestCache("wrong_version")); |
| 1353 |
| 1354 scoped_ptr<disk_cache::BackendImpl> cache; |
| 1355 cache.reset(new disk_cache::BackendImpl( |
| 1356 cache_path(), base::MessageLoopProxy::current().get(), NULL)); |
| 1357 ASSERT_TRUE(NULL != cache.get()); |
| 1358 cache->SetUnitTestMode(); |
| 1359 ASSERT_NE(net::OK, cache->SyncInit()); |
| 1360 |
| 1361 ASSERT_TRUE(NULL == cache->GetTimerForTest()); |
| 1362 |
| 1363 DisableIntegrityCheck(); |
| 1364 } |
| 1365 |
| 1366 TEST_P(DiskCacheBlockfileBackendTest, UsageStats) { |
| 1367 InitCache(); |
| 1368 disk_cache::Entry* entry; |
| 1369 ASSERT_EQ(net::OK, CreateEntry("key", &entry)); |
| 1370 entry->Close(); |
| 1371 FlushQueueForTest(); |
| 1372 |
| 1373 disk_cache::StatsItems stats; |
| 1374 cache()->GetStats(&stats); |
| 1375 EXPECT_FALSE(stats.empty()); |
| 1376 |
| 1377 disk_cache::StatsItems::value_type hits("Create hit", "0x1"); |
| 1378 EXPECT_EQ(1, std::count(stats.begin(), stats.end(), hits)); |
| 1379 |
| 1380 // cache().reset(); |
| 1381 |
| 1382 // Now open the cache and verify that the stats are still there. |
| 1383 //DisableFirstCleanup(); |
| 1384 |
| 1385 InitCache(); |
| 1386 EXPECT_EQ(1, cache()->GetEntryCount()); |
| 1387 |
| 1388 stats.clear(); |
| 1389 cache()->GetStats(&stats); |
| 1390 EXPECT_FALSE(stats.empty()); |
| 1391 |
| 1392 EXPECT_EQ(1, std::count(stats.begin(), stats.end(), hits)); |
| 1393 } |
| 1394 |
| 1395 // If the index size changes when we doom the cache, we should not crash. |
| 1396 TEST_P(DiskCacheBlockfileBackendTest, DoomAll2) { |
| 1397 ASSERT_TRUE(CopyTestCache("bad_rankings2")); |
| 1398 SetMaxSize(20 * 1024 * 1024); |
| 1399 InitCache(); |
| 1400 |
| 1401 EXPECT_EQ(2, cache()->GetEntryCount()); |
| 1402 EXPECT_EQ(net::OK, DoomAllEntries()); |
| 1403 |
| 1404 disk_cache::Entry* entry; |
| 1405 ASSERT_EQ(net::OK, CreateEntry("Something new", &entry)); |
| 1406 entry->Close(); |
| 1407 |
| 1408 EXPECT_EQ(1, cache()->GetEntryCount()); |
| 1409 } |
| 1410 |
| 1411 // We should be able to create the same entry on multiple simultaneous instances |
| 1412 // of the cache. |
| 1413 TEST_P(DiskCacheBlockfileBackendTest, MultipleInstances) { |
| 1414 base::ScopedTempDir store1, store2; |
| 1415 ASSERT_TRUE(store1.CreateUniqueTempDir()); |
| 1416 ASSERT_TRUE(store2.CreateUniqueTempDir()); |
| 1417 |
| 1418 base::Thread cache_thread("CacheThread"); |
| 1419 ASSERT_TRUE(cache_thread.StartWithOptions( |
| 1420 base::Thread::Options(base::MessageLoop::TYPE_IO, 0))); |
| 1421 net::TestCompletionCallback cb; |
| 1422 |
| 1423 const int kNumberOfCaches = 2; |
| 1424 scoped_ptr<disk_cache::Backend> cache[kNumberOfCaches]; |
| 1425 |
| 1426 int rv = |
| 1427 disk_cache::CreateCacheBackend(net::DISK_CACHE, |
| 1428 net::CACHE_BACKEND_DEFAULT, |
| 1429 store1.path(), |
| 1430 0, |
| 1431 false, |
| 1432 cache_thread.message_loop_proxy().get(), |
| 1433 NULL, |
| 1434 &cache[0], |
| 1435 cb.callback()); |
| 1436 ASSERT_EQ(net::OK, cb.GetResult(rv)); |
| 1437 rv = disk_cache::CreateCacheBackend(net::MEDIA_CACHE, |
| 1438 net::CACHE_BACKEND_DEFAULT, |
| 1439 store2.path(), |
| 1440 0, |
| 1441 false, |
| 1442 cache_thread.message_loop_proxy().get(), |
| 1443 NULL, |
| 1444 &cache[1], |
| 1445 cb.callback()); |
| 1446 ASSERT_EQ(net::OK, cb.GetResult(rv)); |
| 1447 |
| 1448 ASSERT_TRUE(cache[0].get() != NULL && cache[1].get() != NULL); |
| 1449 |
| 1450 std::string key("the first key"); |
| 1451 disk_cache::Entry* entry; |
| 1452 for (int i = 0; i < kNumberOfCaches; i++) { |
| 1453 rv = cache[i]->CreateEntry(key, &entry, cb.callback()); |
| 1454 ASSERT_EQ(net::OK, cb.GetResult(rv)); |
| 1455 entry->Close(); |
| 1456 } |
| 1457 } |
| 1458 |
| 1459 // Test the six regions of the curve that determines the max cache size. |
| 1460 TEST_P(DiskCacheBlockfileBackendTest, AutomaticMaxSize) { |
| 1461 using disk_cache::kDefaultCacheSize; |
| 1462 int64 large_size = kDefaultCacheSize; |
| 1463 |
| 1464 // Region 1: expected = available * 0.8 |
| 1465 EXPECT_EQ((kDefaultCacheSize - 1) * 8 / 10, |
| 1466 disk_cache::PreferredCacheSize(large_size - 1)); |
| 1467 EXPECT_EQ(kDefaultCacheSize * 8 / 10, |
| 1468 disk_cache::PreferredCacheSize(large_size)); |
| 1469 EXPECT_EQ(kDefaultCacheSize - 1, |
| 1470 disk_cache::PreferredCacheSize(large_size * 10 / 8 - 1)); |
| 1471 |
| 1472 // Region 2: expected = default_size |
| 1473 EXPECT_EQ(kDefaultCacheSize, |
| 1474 disk_cache::PreferredCacheSize(large_size * 10 / 8)); |
| 1475 EXPECT_EQ(kDefaultCacheSize, |
| 1476 disk_cache::PreferredCacheSize(large_size * 10 - 1)); |
| 1477 |
| 1478 // Region 3: expected = available * 0.1 |
| 1479 EXPECT_EQ(kDefaultCacheSize, |
| 1480 disk_cache::PreferredCacheSize(large_size * 10)); |
| 1481 EXPECT_EQ((kDefaultCacheSize * 25 - 1) / 10, |
| 1482 disk_cache::PreferredCacheSize(large_size * 25 - 1)); |
| 1483 |
| 1484 // Region 4: expected = default_size * 2.5 |
| 1485 EXPECT_EQ(kDefaultCacheSize * 25 / 10, |
| 1486 disk_cache::PreferredCacheSize(large_size * 25)); |
| 1487 EXPECT_EQ(kDefaultCacheSize * 25 / 10, |
| 1488 disk_cache::PreferredCacheSize(large_size * 100 - 1)); |
| 1489 EXPECT_EQ(kDefaultCacheSize * 25 / 10, |
| 1490 disk_cache::PreferredCacheSize(large_size * 100)); |
| 1491 EXPECT_EQ(kDefaultCacheSize * 25 / 10, |
| 1492 disk_cache::PreferredCacheSize(large_size * 250 - 1)); |
| 1493 |
| 1494 // Region 5: expected = available * 0.1 |
| 1495 int64 largest_size = kDefaultCacheSize * 4; |
| 1496 EXPECT_EQ(kDefaultCacheSize * 25 / 10, |
| 1497 disk_cache::PreferredCacheSize(large_size * 250)); |
| 1498 EXPECT_EQ(largest_size - 1, |
| 1499 disk_cache::PreferredCacheSize(largest_size * 100 - 1)); |
| 1500 |
| 1501 // Region 6: expected = largest possible size |
| 1502 EXPECT_EQ(largest_size, |
| 1503 disk_cache::PreferredCacheSize(largest_size * 100)); |
| 1504 EXPECT_EQ(largest_size, |
| 1505 disk_cache::PreferredCacheSize(largest_size * 10000)); |
| 1506 } |
| 1507 |
| 1508 // Tests that we can "migrate" a running instance from one experiment group to |
| 1509 // another. |
| 1510 TEST_P(DiskCacheBlockfileBackendTest, Histograms) { |
| 1511 InitCache(); |
| 1512 disk_cache::BackendImpl* backend_ = cache_impl(); // Needed be the macro. |
| 1513 |
| 1514 for (int i = 1; i < 3; i++) { |
| 1515 CACHE_UMA(HOURS, "FillupTime", i, 28); |
| 1516 } |
| 1517 } |
| 1518 |
| 1519 // Make sure that we keep the total memory used by the internal buffers under |
| 1520 // control. |
| 1521 TEST_P(DiskCacheBlockfileBackendTest, TotalBuffersSize1) { |
| 1522 InitCache(); |
| 1523 std::string key("the first key"); |
| 1524 disk_cache::Entry* entry; |
| 1525 ASSERT_EQ(net::OK, CreateEntry(key, &entry)); |
| 1526 |
| 1527 const int kSize = 200; |
| 1528 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize)); |
| 1529 CacheTestFillBuffer(buffer->data(), kSize, true); |
| 1530 |
| 1531 for (int i = 0; i < 10; i++) { |
| 1532 SCOPED_TRACE(i); |
| 1533 // Allocate 2MB for this entry. |
| 1534 EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, true)); |
| 1535 EXPECT_EQ(kSize, WriteData(entry, 1, 0, buffer.get(), kSize, true)); |
| 1536 EXPECT_EQ(kSize, |
| 1537 WriteData(entry, 0, 1024 * 1024, buffer.get(), kSize, false)); |
| 1538 EXPECT_EQ(kSize, |
| 1539 WriteData(entry, 1, 1024 * 1024, buffer.get(), kSize, false)); |
| 1540 |
| 1541 // Delete one of the buffers and truncate the other. |
| 1542 EXPECT_EQ(0, WriteData(entry, 0, 0, buffer.get(), 0, true)); |
| 1543 EXPECT_EQ(0, WriteData(entry, 1, 10, buffer.get(), 0, true)); |
| 1544 |
| 1545 // Delete the second buffer, writing 10 bytes to disk. |
| 1546 entry->Close(); |
| 1547 ASSERT_EQ(net::OK, OpenEntry(key, &entry)); |
| 1548 } |
| 1549 |
| 1550 entry->Close(); |
| 1551 EXPECT_EQ(0, cache_impl()->GetTotalBuffersSize()); |
| 1552 } |
| 1553 |
| 1554 // This test assumes at least 150MB of system memory. |
| 1555 TEST_P(DiskCacheBlockfileBackendTest, TotalBuffersSize2) { |
| 1556 InitCache(); |
| 1557 |
| 1558 const int kOneMB = 1024 * 1024; |
| 1559 EXPECT_TRUE(cache_impl()->IsAllocAllowed(0, kOneMB)); |
| 1560 EXPECT_EQ(kOneMB, cache_impl()->GetTotalBuffersSize()); |
| 1561 |
| 1562 EXPECT_TRUE(cache_impl()->IsAllocAllowed(0, kOneMB)); |
| 1563 EXPECT_EQ(kOneMB * 2, cache_impl()->GetTotalBuffersSize()); |
| 1564 |
| 1565 EXPECT_TRUE(cache_impl()->IsAllocAllowed(0, kOneMB)); |
| 1566 EXPECT_EQ(kOneMB * 3, cache_impl()->GetTotalBuffersSize()); |
| 1567 |
| 1568 cache_impl()->BufferDeleted(kOneMB); |
| 1569 EXPECT_EQ(kOneMB * 2, cache_impl()->GetTotalBuffersSize()); |
| 1570 |
| 1571 // Check the upper limit. |
| 1572 EXPECT_FALSE(cache_impl()->IsAllocAllowed(0, 30 * kOneMB)); |
| 1573 |
| 1574 for (int i = 0; i < 30; i++) |
| 1575 cache_impl()->IsAllocAllowed(0, kOneMB); // Ignore the result. |
| 1576 |
| 1577 EXPECT_FALSE(cache_impl()->IsAllocAllowed(0, kOneMB)); |
| 1578 } |
| 1579 |
| 1580 // Tests that sharing of external files works and we are able to delete the |
| 1581 // files when we need to. |
| 1582 TEST_P(DiskCacheBlockfileBackendTest, FileSharing) { |
| 1583 InitCache(); |
| 1584 |
| 1585 disk_cache::Addr address(0x80000001); |
| 1586 ASSERT_TRUE(cache_impl()->CreateExternalFile(&address)); |
| 1587 base::FilePath name = cache_impl()->GetFileName(address); |
| 1588 |
| 1589 scoped_refptr<disk_cache::File> file(new disk_cache::File(false)); |
| 1590 file->Init(name); |
| 1591 |
| 1592 #if defined(OS_WIN) |
| 1593 DWORD sharing = FILE_SHARE_READ | FILE_SHARE_WRITE; |
| 1594 DWORD access = GENERIC_READ | GENERIC_WRITE; |
| 1595 base::win::ScopedHandle file2(CreateFile( |
| 1596 name.value().c_str(), access, sharing, NULL, OPEN_EXISTING, 0, NULL)); |
| 1597 EXPECT_FALSE(file2.IsValid()); |
| 1598 |
| 1599 sharing |= FILE_SHARE_DELETE; |
| 1600 file2.Set(CreateFile(name.value().c_str(), access, sharing, NULL, |
| 1601 OPEN_EXISTING, 0, NULL)); |
| 1602 EXPECT_TRUE(file2.IsValid()); |
| 1603 #endif |
| 1604 |
| 1605 EXPECT_TRUE(base::DeleteFile(name, false)); |
| 1606 |
| 1607 // We should be able to use the file. |
| 1608 const int kSize = 200; |
| 1609 char buffer1[kSize]; |
| 1610 char buffer2[kSize]; |
| 1611 memset(buffer1, 't', kSize); |
| 1612 memset(buffer2, 0, kSize); |
| 1613 EXPECT_TRUE(file->Write(buffer1, kSize, 0)); |
| 1614 EXPECT_TRUE(file->Read(buffer2, kSize, 0)); |
| 1615 EXPECT_EQ(0, memcmp(buffer1, buffer2, kSize)); |
| 1616 |
| 1617 EXPECT_TRUE(disk_cache::DeleteCacheFile(name)); |
| 1618 } |
| 1619 |
| 1620 TEST_P(DiskCacheBlockfileBackendTest, UpdateRankForExternalCacheHit) { |
| 1621 InitCache(); |
| 1622 |
| 1623 disk_cache::Entry* entry; |
| 1624 |
| 1625 for (int i = 0; i < 2; ++i) { |
| 1626 std::string key = base::StringPrintf("key%d", i); |
| 1627 ASSERT_EQ(net::OK, CreateEntry(key, &entry)); |
| 1628 entry->Close(); |
| 1629 } |
| 1630 |
| 1631 // Ping the oldest entry. |
| 1632 cache()->OnExternalCacheHit("key0"); |
| 1633 |
| 1634 TrimForTest(false); |
| 1635 |
| 1636 // Make sure the older key remains. |
| 1637 EXPECT_EQ(1, cache()->GetEntryCount()); |
| 1638 ASSERT_EQ(net::OK, OpenEntry("key0", &entry)); |
| 1639 entry->Close(); |
| 1640 } |
| 1641 |
| 1642 } // namespace |
| 1643 |
| 1644 } // namespace disk_cache |
OLD | NEW |