Index: net/disk_cache/v2/blockfile_unittest.cc |
diff --git a/net/disk_cache/v2/blockfile_unittest.cc b/net/disk_cache/v2/blockfile_unittest.cc |
new file mode 100644 |
index 0000000000000000000000000000000000000000..56c7fc995dfa0ea384c88b1275757a87489baf34 |
--- /dev/null |
+++ b/net/disk_cache/v2/blockfile_unittest.cc |
@@ -0,0 +1,1644 @@ |
+// Copyright (c) 2014 The Chromium Authors. All rights reserved. |
+// Use of this source code is governed by a BSD-style license that can be |
+// found in the LICENSE file. |
+ |
+#include "base/file_util.h" |
+#include "base/single_thread_task_runner.h" |
+#include "base/strings/string_util.h" |
+#include "base/strings/stringprintf.h" |
+#include "base/threading/thread_restrictions.h" |
+#include "base/memory/scoped_ptr.h" |
+#include "net/base/test_completion_callback.h" |
+#include "net/disk_cache/cache_util.h" |
+#include "net/disk_cache/disk_cache_test.h" |
+#include "net/disk_cache/disk_cache_test_util.h" |
+#include "net/disk_cache/entry_tests.h" |
+#include "net/disk_cache/backend_tests.h" |
+#include "net/disk_cache/entry_sync_tests.h" |
+#include "net/disk_cache/blockfile/backend_impl.h" |
+#include "net/disk_cache/blockfile/entry_impl.h" |
+ |
+#define CACHE_HISTOGRAM_MACROS_BACKEND_IMPL_OBJ backend_ |
+#include "net/disk_cache/blockfile/histogram_macros.h" |
+ |
+namespace disk_cache { |
+ |
+namespace { |
+ |
+class BlockfileCacheCreateBackendExtraData |
+ : public BackendTestTraits::CreateBackendExtraData { |
+ public: |
+ uint32 mask() const { return mask_; } |
+ void set_mask(uint32 mask) { mask_ = mask; } |
+ |
+ private: |
+ friend class DiskCacheBlockfileBackendTest; |
+ |
+ BlockfileCacheCreateBackendExtraData() : mask_(0) {} |
+ |
+ uint32 mask_; |
+}; |
+ |
+class BlockfileCacheBackendTraits : public BackendTestTraits { |
+ public: |
+ virtual ~BlockfileCacheBackendTraits() {} |
+ |
+ virtual Backend* CreateBackend( |
+ const CreateBackendExtraData* extra_data, |
+ const base::FilePath& cache_path, |
+ int max_size, |
+ base::MessageLoopProxy* task_runner) const OVERRIDE { |
+ const BlockfileCacheCreateBackendExtraData* blockfile_extra_data = |
+ static_cast<const BlockfileCacheCreateBackendExtraData*>(extra_data); |
+ |
+ scoped_ptr<BackendImpl> blockfile_backend; |
+ if (blockfile_extra_data && blockfile_extra_data->mask()) { |
+ blockfile_backend.reset( |
+ new BackendImpl(cache_path, blockfile_extra_data->mask(), |
+ task_runner, NULL)); |
+ } else { |
+ blockfile_backend.reset(new BackendImpl(cache_path, task_runner, NULL)); |
+ } |
+ if (!blockfile_backend) |
+ return NULL; |
+ |
+ blockfile_backend->SetMaxSize(max_size); |
+ if (flags_ & kNewEviction) |
+ blockfile_backend->SetNewEviction(); |
+ blockfile_backend->SetFlags(flags_); |
+ blockfile_backend->SetType(type_); |
+ |
+ net::TestCompletionCallback cb; |
+ int result = blockfile_backend->Init(cb.callback()); |
+ if (cb.GetResult(result) != net::OK) { |
+ LOG(INFO) << "init failed :-("; |
+ return NULL; |
+ } |
+ return blockfile_backend.release(); |
+ } |
+ |
+ virtual bool UsesCacheThread() const OVERRIDE { return true; } |
+ virtual bool WritesUpdateLastUsed() const OVERRIDE { |
+ return type_ != net::APP_CACHE; |
+ } |
+ virtual bool ReadsUpdateLastUsed() const OVERRIDE { |
+ return type_ != net::APP_CACHE && type_ != net::SHADER_CACHE; |
+ } |
+ virtual int SparseRoundingInterval() const OVERRIDE { return 1024; } |
+ virtual bool EntryCountIncludesSparseRanges() const OVERRIDE { |
+ return true; |
+ } |
+ virtual bool EnumerationsAreLexicographicByKey() const OVERRIDE { |
+ return true; |
+ } |
+ |
+ virtual bool SetMaxSize(Backend* backend, int size) const OVERRIDE { |
+ return static_cast<BackendImpl*>(backend)->SetMaxSize(size); |
+ } |
+ |
+ virtual void FlushQueueForTest(Backend* backend) const OVERRIDE { |
+ BackendImpl* blockfile_backend = static_cast<BackendImpl*>(backend); |
+ |
+ net::TestCompletionCallback cb; |
+ int rv = blockfile_backend->FlushQueueForTest(cb.callback()); |
+ EXPECT_EQ(net::OK, cb.GetResult(rv)); |
+ } |
+ |
+ net::CacheType type() const { return type_; } |
+ uint32 flags() const { return flags_; } |
+ |
+ // DiskCache(), AppCache() and ShaderCache() return traits objects for the |
+ // entry unit tests. |
+ static const BackendTestTraits* DiskCache() { |
+ static const BlockfileCacheBackendTraits traits(net::DISK_CACHE, kNoRandom); |
+ return &traits; |
+ } |
+ |
+ static const BackendTestTraits* AppCache() { |
+ static const BlockfileCacheBackendTraits traits(net::APP_CACHE, kNoRandom); |
+ return &traits; |
+ } |
+ |
+ static const BackendTestTraits* ShaderCache() { |
+ static const BlockfileCacheBackendTraits traits(net::SHADER_CACHE, |
+ kNoRandom); |
+ return &traits; |
+ } |
+ |
+ // OldEviction() and NewEviction() return traits objects for the backend |
+ // unit tests. |
+ static const BackendTestTraits* OldEviction() { |
+ static const BlockfileCacheBackendTraits traits(net::DISK_CACHE, kNoRandom); |
+ return &traits; |
+ } |
+ |
+ static const BackendTestTraits* NewEviction() { |
+ static const BlockfileCacheBackendTraits traits(net::DISK_CACHE, |
+ kNoRandom | kNewEviction); |
+ return &traits; |
+ } |
+ |
+ private: |
+ BlockfileCacheBackendTraits(net::CacheType type, |
+ uint32 flags) : type_(type), |
+ flags_(flags) {} |
+ |
+ const net::CacheType type_; |
+ const uint32 flags_; |
+}; |
+ |
+// Run entry tests on DISK_CACHE, APP_CACHE and SHADER_CACHE type caches. |
+INSTANTIATE_TEST_CASE_P( |
+ BlockfileCache, DiskCacheEntryTest, |
+ ::testing::Values(BlockfileCacheBackendTraits::DiskCache(), |
+ BlockfileCacheBackendTraits::AppCache(), |
+ BlockfileCacheBackendTraits::ShaderCache())); |
+ |
+INSTANTIATE_TEST_CASE_P( |
+ BlockfileCache, DiskCacheEntrySyncTest, |
+ ::testing::Values(BlockfileCacheBackendTraits::DiskCache(), |
+ BlockfileCacheBackendTraits::AppCache(), |
+ BlockfileCacheBackendTraits::ShaderCache())); |
+ |
+class DiskCacheBlockfileEntryTest : public DiskCacheTest { |
+ protected: |
+ BackendImpl* cache_impl() { |
+ return static_cast<BackendImpl*>(DiskCacheTest::cache()); |
+ } |
+}; |
+ |
+INSTANTIATE_TEST_CASE_P( |
+ BlockfileCache, DiskCacheBlockfileEntryTest, |
+ ::testing::Values(BlockfileCacheBackendTraits::DiskCache(), |
+ BlockfileCacheBackendTraits::AppCache(), |
+ BlockfileCacheBackendTraits::ShaderCache())); |
+ |
+// Run backend tests using both the old and the new eviction algorithm. |
+INSTANTIATE_TEST_CASE_P( |
+ BlockfileCache, DiskCacheBackendTest, |
+ ::testing::Values(BlockfileCacheBackendTraits::OldEviction(), |
+ BlockfileCacheBackendTraits::NewEviction())); |
+ |
+ |
+class DiskCacheBlockfileBackendTest : public DiskCacheTest { |
+ public: |
+ BackendImpl* cache_impl() { |
+ return static_cast<BackendImpl*>(DiskCacheTest::cache()); |
+ } |
+ |
+ const BlockfileCacheBackendTraits* blockfile_traits() const { |
+ return static_cast<const BlockfileCacheBackendTraits*>( |
+ DiskCacheTest::traits()); |
+ } |
+ |
+ void InitCache() { |
+ InitCacheWithExtraData(&extra_data_); |
+ } |
+ |
+ void SimulateCrash() { |
+ net::TestCompletionCallback cb; |
+ int rv = cache_impl()->FlushQueueForTest(cb.callback()); |
+ ASSERT_EQ(net::OK, cb.GetResult(rv)); |
+ cache_impl()->ClearRefCountForTest(); |
+ |
+ // cache_.reset(); |
+ //PECT_TRUE(CheckCacheIntegrity(cache_path_, new_eviction_, mask_)); |
+ |
+ InitCache(); |
+ } |
+ |
+ void TrimForTest(bool empty) { |
+ RunTaskForTest(base::Bind(&disk_cache::BackendImpl::TrimForTest, |
+ base::Unretained(cache_impl()), |
+ empty)); |
+ } |
+ |
+ void TrimDeletedListForTest(bool empty) { |
+ RunTaskForTest(base::Bind(&disk_cache::BackendImpl::TrimDeletedListForTest, |
+ base::Unretained(cache_impl()), |
+ empty)); |
+ } |
+ |
+ BlockfileCacheCreateBackendExtraData* extra_data() { return &extra_data_; } |
+ |
+ void SetMask(uint32 mask) { |
+ extra_data()->set_mask(mask); |
+ } |
+ |
+ private: |
+ void RunTaskForTest(const base::Closure& closure) { |
+ net::TestCompletionCallback cb; |
+ int rv = cache_impl()->RunTaskForTest(closure, cb.callback()); |
+ EXPECT_EQ(net::OK, cb.GetResult(rv)); |
+ } |
+ |
+ BlockfileCacheCreateBackendExtraData extra_data_; |
+}; |
+ |
+INSTANTIATE_TEST_CASE_P( |
+ BlockfileCache, DiskCacheBlockfileBackendTest, |
+ ::testing::Values(BlockfileCacheBackendTraits::OldEviction(), |
+ BlockfileCacheBackendTraits::NewEviction())); |
+ |
+// Tests that we handle the content correctly when buffering, a feature of the |
+// blockfile cache that permits fast responses to certain reads. |
+TEST_P(DiskCacheBlockfileEntryTest, BlockfileBuffering) { |
+ InitCache(); |
+ |
+ std::string key("the first key"); |
+ disk_cache::Entry* entry; |
+ ASSERT_EQ(net::OK, CreateEntry(key, &entry)); |
+ |
+ const int kSize = 200; |
+ scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize)); |
+ scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize)); |
+ CacheTestFillBuffer(buffer1->data(), kSize, true); |
+ CacheTestFillBuffer(buffer2->data(), kSize, true); |
+ |
+ EXPECT_EQ(kSize, WriteData(entry, 1, 0, buffer1.get(), kSize, false)); |
+ entry->Close(); |
+ |
+ // Write a little more and read what we wrote before. |
+ ASSERT_EQ(net::OK, OpenEntry(key, &entry)); |
+ EXPECT_EQ(kSize, WriteData(entry, 1, 5000, buffer1.get(), kSize, false)); |
+ EXPECT_EQ(kSize, ReadData(entry, 1, 0, buffer2.get(), kSize)); |
+ EXPECT_TRUE(!memcmp(buffer2->data(), buffer1->data(), kSize)); |
+ |
+ // Now go to an external file. |
+ EXPECT_EQ(kSize, WriteData(entry, 1, 18000, buffer1.get(), kSize, false)); |
+ entry->Close(); |
+ |
+ // Write something else and verify old data. |
+ ASSERT_EQ(net::OK, OpenEntry(key, &entry)); |
+ EXPECT_EQ(kSize, WriteData(entry, 1, 10000, buffer1.get(), kSize, false)); |
+ CacheTestFillBuffer(buffer2->data(), kSize, true); |
+ EXPECT_EQ(kSize, ReadData(entry, 1, 5000, buffer2.get(), kSize)); |
+ EXPECT_TRUE(!memcmp(buffer2->data(), buffer1->data(), kSize)); |
+ CacheTestFillBuffer(buffer2->data(), kSize, true); |
+ EXPECT_EQ(kSize, ReadData(entry, 1, 0, buffer2.get(), kSize)); |
+ EXPECT_TRUE(!memcmp(buffer2->data(), buffer1->data(), kSize)); |
+ CacheTestFillBuffer(buffer2->data(), kSize, true); |
+ EXPECT_EQ(kSize, ReadData(entry, 1, 18000, buffer2.get(), kSize)); |
+ EXPECT_TRUE(!memcmp(buffer2->data(), buffer1->data(), kSize)); |
+ |
+ // Extend the file some more. |
+ EXPECT_EQ(kSize, WriteData(entry, 1, 23000, buffer1.get(), kSize, false)); |
+ entry->Close(); |
+ |
+ // And now make sure that we can deal with data in both places (ram/disk). |
+ ASSERT_EQ(net::OK, OpenEntry(key, &entry)); |
+ EXPECT_EQ(kSize, WriteData(entry, 1, 17000, buffer1.get(), kSize, false)); |
+ |
+ // We should not overwrite the data at 18000 with this. |
+ EXPECT_EQ(kSize, WriteData(entry, 1, 19000, buffer1.get(), kSize, false)); |
+ CacheTestFillBuffer(buffer2->data(), kSize, true); |
+ EXPECT_EQ(kSize, ReadData(entry, 1, 18000, buffer2.get(), kSize)); |
+ EXPECT_TRUE(!memcmp(buffer2->data(), buffer1->data(), kSize)); |
+ CacheTestFillBuffer(buffer2->data(), kSize, true); |
+ EXPECT_EQ(kSize, ReadData(entry, 1, 17000, buffer2.get(), kSize)); |
+ EXPECT_TRUE(!memcmp(buffer2->data(), buffer1->data(), kSize)); |
+ |
+ EXPECT_EQ(kSize, WriteData(entry, 1, 22900, buffer1.get(), kSize, false)); |
+ CacheTestFillBuffer(buffer2->data(), kSize, true); |
+ EXPECT_EQ(100, ReadData(entry, 1, 23000, buffer2.get(), kSize)); |
+ EXPECT_TRUE(!memcmp(buffer2->data(), buffer1->data() + 100, 100)); |
+ |
+ CacheTestFillBuffer(buffer2->data(), kSize, true); |
+ EXPECT_EQ(100, ReadData(entry, 1, 23100, buffer2.get(), kSize)); |
+ EXPECT_TRUE(!memcmp(buffer2->data(), buffer1->data() + 100, 100)); |
+ |
+ // Extend the file again and read before without closing the entry. |
+ EXPECT_EQ(kSize, WriteData(entry, 1, 25000, buffer1.get(), kSize, false)); |
+ EXPECT_EQ(kSize, WriteData(entry, 1, 45000, buffer1.get(), kSize, false)); |
+ CacheTestFillBuffer(buffer2->data(), kSize, true); |
+ EXPECT_EQ(kSize, ReadData(entry, 1, 25000, buffer2.get(), kSize)); |
+ EXPECT_TRUE(!memcmp(buffer2->data(), buffer1->data(), kSize)); |
+ CacheTestFillBuffer(buffer2->data(), kSize, true); |
+ EXPECT_EQ(kSize, ReadData(entry, 1, 45000, buffer2.get(), kSize)); |
+ EXPECT_TRUE(!memcmp(buffer2->data(), buffer1->data(), kSize)); |
+ |
+ entry->Close(); |
+} |
+ |
+// Tests that we discard entries if the data is missing. |
+TEST_P(DiskCacheBlockfileEntryTest, MissingData) { |
+ InitCache(); |
+ |
+ std::string key("the first key"); |
+ disk_cache::Entry* entry; |
+ ASSERT_EQ(net::OK, CreateEntry(key, &entry)); |
+ |
+ // Write to an external file. |
+ const int kSize = 20000; |
+ scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize)); |
+ CacheTestFillBuffer(buffer->data(), kSize, false); |
+ EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false)); |
+ entry->Close(); |
+ FlushQueueForTest(); |
+ |
+ disk_cache::Addr address(0x80000001); |
+ base::FilePath name = cache_impl()->GetFileName(address); |
+ EXPECT_TRUE(base::DeleteFile(name, false)); |
+ |
+ // Attempt to read the data. |
+ ASSERT_EQ(net::OK, OpenEntry(key, &entry)); |
+ EXPECT_EQ(net::ERR_FILE_NOT_FOUND, |
+ ReadData(entry, 0, 0, buffer.get(), kSize)); |
+ entry->Close(); |
+ |
+ // The entry should be gone. |
+ ASSERT_NE(net::OK, OpenEntry(key, &entry)); |
+} |
+ |
+// Tests that corrupt sparse children are removed automatically. |
+TEST_P(DiskCacheBlockfileEntryTest, CleanupSparseEntry) { |
+ InitCache(); |
+ std::string key("the first key"); |
+ disk_cache::Entry* entry; |
+ ASSERT_EQ(net::OK, CreateEntry(key, &entry)); |
+ |
+ const int kSize = 4 * 1024; |
+ scoped_refptr<net::IOBuffer> buf1(new net::IOBuffer(kSize)); |
+ CacheTestFillBuffer(buf1->data(), kSize, false); |
+ |
+ const int k1Meg = 1024 * 1024; |
+ EXPECT_EQ(kSize, WriteSparseData(entry, 8192, buf1.get(), kSize)); |
+ EXPECT_EQ(kSize, WriteSparseData(entry, k1Meg + 8192, buf1.get(), kSize)); |
+ EXPECT_EQ(kSize, WriteSparseData(entry, 2 * k1Meg + 8192, buf1.get(), kSize)); |
+ entry->Close(); |
+ EXPECT_EQ(4, cache()->GetEntryCount()); |
+ |
+ void* iter = NULL; |
+ int count = 0; |
+ std::string child_key[2]; |
+ while (OpenNextEntry(&iter, &entry) == net::OK) { |
+ ASSERT_TRUE(entry != NULL); |
+ // Writing to an entry will alter the LRU list and invalidate the iterator. |
+ if (entry->GetKey() != key && count < 2) |
+ child_key[count++] = entry->GetKey(); |
+ entry->Close(); |
+ } |
+ for (int i = 0; i < 2; i++) { |
+ ASSERT_EQ(net::OK, OpenEntry(child_key[i], &entry)); |
+ // Overwrite the header's magic and signature. |
+ EXPECT_EQ(12, WriteData(entry, 2, 0, buf1.get(), 12, false)); |
+ entry->Close(); |
+ } |
+ |
+ EXPECT_EQ(4, cache()->GetEntryCount()); |
+ ASSERT_EQ(net::OK, OpenEntry(key, &entry)); |
+ |
+ // Two children should be gone. One while reading and one while writing. |
+ EXPECT_EQ(0, ReadSparseData(entry, 2 * k1Meg + 8192, buf1.get(), kSize)); |
+ EXPECT_EQ(kSize, WriteSparseData(entry, k1Meg + 16384, buf1.get(), kSize)); |
+ EXPECT_EQ(0, ReadSparseData(entry, k1Meg + 8192, buf1.get(), kSize)); |
+ |
+ // We never touched this one. |
+ EXPECT_EQ(kSize, ReadSparseData(entry, 8192, buf1.get(), kSize)); |
+ entry->Close(); |
+ |
+ // We re-created one of the corrupt children. |
+ EXPECT_EQ(3, cache()->GetEntryCount()); |
+} |
+ |
+TEST_P(DiskCacheBlockfileEntryTest, CancelSparseIO) { |
+ UseCurrentThread(); |
+ InitCache(); |
+ std::string key("the first key"); |
+ disk_cache::Entry* entry; |
+ ASSERT_EQ(net::OK, CreateEntry(key, &entry)); |
+ |
+ const int kSize = 40 * 1024; |
+ scoped_refptr<net::IOBuffer> buf(new net::IOBuffer(kSize)); |
+ CacheTestFillBuffer(buf->data(), kSize, false); |
+ |
+ // This will open and write two "real" entries. |
+ net::TestCompletionCallback cb1, cb2, cb3, cb4, cb5; |
+ int rv = entry->WriteSparseData( |
+ 1024 * 1024 - 4096, buf.get(), kSize, cb1.callback()); |
+ EXPECT_EQ(net::ERR_IO_PENDING, rv); |
+ |
+ int64 offset = 0; |
+ rv = entry->GetAvailableRange(offset, kSize, &offset, cb5.callback()); |
+ rv = cb5.GetResult(rv); |
+ if (!cb1.have_result()) { |
+ // We may or may not have finished writing to the entry. If we have not, |
+ // we cannot start another operation at this time. |
+ EXPECT_EQ(net::ERR_CACHE_OPERATION_NOT_SUPPORTED, rv); |
+ } |
+ |
+ // We cancel the pending operation, and register multiple notifications. |
+ entry->CancelSparseIO(); |
+ EXPECT_EQ(net::ERR_IO_PENDING, entry->ReadyForSparseIO(cb2.callback())); |
+ EXPECT_EQ(net::ERR_IO_PENDING, entry->ReadyForSparseIO(cb3.callback())); |
+ entry->CancelSparseIO(); // Should be a no op at this point. |
+ EXPECT_EQ(net::ERR_IO_PENDING, entry->ReadyForSparseIO(cb4.callback())); |
+ |
+ if (!cb1.have_result()) { |
+ EXPECT_EQ(net::ERR_CACHE_OPERATION_NOT_SUPPORTED, |
+ entry->ReadSparseData( |
+ offset, buf.get(), kSize, net::CompletionCallback())); |
+ EXPECT_EQ(net::ERR_CACHE_OPERATION_NOT_SUPPORTED, |
+ entry->WriteSparseData( |
+ offset, buf.get(), kSize, net::CompletionCallback())); |
+ } |
+ |
+ // Now see if we receive all notifications. Note that we should not be able |
+ // to write everything (unless the timing of the system is really weird). |
+ rv = cb1.WaitForResult(); |
+ EXPECT_TRUE(rv == 4096 || rv == kSize); |
+ EXPECT_EQ(net::OK, cb2.WaitForResult()); |
+ EXPECT_EQ(net::OK, cb3.WaitForResult()); |
+ EXPECT_EQ(net::OK, cb4.WaitForResult()); |
+ |
+ rv = entry->GetAvailableRange(offset, kSize, &offset, cb5.callback()); |
+ EXPECT_EQ(0, cb5.GetResult(rv)); |
+ entry->Close(); |
+} |
+ |
+// Tests that we perform sanity checks on an entry's key. Note that there are |
+// other tests that exercise sanity checks by using saved corrupt files. |
+TEST_P(DiskCacheBlockfileEntryTest, KeySanityCheck) { |
+ UseCurrentThread(); |
+ InitCache(); |
+ std::string key("the first key"); |
+ disk_cache::Entry* entry; |
+ ASSERT_EQ(net::OK, CreateEntry(key, &entry)); |
+ |
+ disk_cache::EntryImpl* entry_impl = |
+ static_cast<disk_cache::EntryImpl*>(entry); |
+ disk_cache::EntryStore* store = entry_impl->entry()->Data(); |
+ |
+ // We have reserved space for a short key (one block), let's say that the key |
+ // takes more than one block, and remove the NULLs after the actual key. |
+ store->key_len = 800; |
+ memset(store->key + key.size(), 'k', sizeof(store->key) - key.size()); |
+ entry_impl->entry()->set_modified(); |
+ entry->Close(); |
+ |
+ // We have a corrupt entry. Now reload it. We should NOT read beyond the |
+ // allocated buffer here. |
+ ASSERT_NE(net::OK, OpenEntry(key, &entry)); |
+ //DisableIntegrityCheck(); |
+} |
+ |
+// Backend tests. |
+ |
+// Tests that |BackendImpl| fails to initialize with a missing file. |
+TEST_P(DiskCacheBlockfileBackendTest, CreateBackend_MissingFile) { |
+ ASSERT_TRUE(CopyTestCache("bad_entry")); |
+ base::FilePath filename = cache_path().AppendASCII("data_1"); |
+ base::DeleteFile(filename, false); |
+ base::Thread cache_thread("CacheThread"); |
+ ASSERT_TRUE(cache_thread.StartWithOptions( |
+ base::Thread::Options(base::MessageLoop::TYPE_IO, 0))); |
+ net::TestCompletionCallback cb; |
+ |
+ bool prev = base::ThreadRestrictions::SetIOAllowed(false); |
+ scoped_ptr<disk_cache::BackendImpl> cache(new disk_cache::BackendImpl( |
+ cache_path(), cache_thread.message_loop_proxy().get(), NULL)); |
+ int rv = cache->Init(cb.callback()); |
+ EXPECT_EQ(net::ERR_FAILED, cb.GetResult(rv)); |
+ base::ThreadRestrictions::SetIOAllowed(prev); |
+ |
+ cache.reset(); |
+ DisableIntegrityCheck(); |
+} |
+ |
+// TODO(gavinp): less lame. |
+TEST_P(DiskCacheBlockfileBackendTest, TruncatedIndex) { |
+ base::FilePath index = cache_path().AppendASCII("index"); |
+ ASSERT_EQ(5, file_util::WriteFile(index, "hello", 5)); |
+ |
+ base::Thread cache_thread("CacheThread"); |
+ ASSERT_TRUE(cache_thread.StartWithOptions( |
+ base::Thread::Options(base::MessageLoop::TYPE_IO, 0))); |
+ net::TestCompletionCallback cb; |
+ |
+ scoped_ptr<disk_cache::Backend> backend; |
+ int rv = |
+ disk_cache::CreateCacheBackend(net::DISK_CACHE, |
+ net::CACHE_BACKEND_BLOCKFILE, |
+ cache_path(), |
+ 0, |
+ false, |
+ cache_thread.message_loop_proxy().get(), |
+ NULL, |
+ &backend, |
+ cb.callback()); |
+ ASSERT_NE(net::OK, cb.GetResult(rv)); |
+ |
+ ASSERT_FALSE(backend); |
+} |
+ |
+// Tests the chaining of an entry to the current head. |
+TEST_P(DiskCacheBlockfileBackendTest, Chain) { |
+ SetMask(0x1); // 2-entry table. |
+ SetMaxSize(0x3000); // 12 kB. |
+ InitCache(); |
+ |
+ disk_cache::Entry* entry; |
+ ASSERT_EQ(net::OK, CreateEntry("The first key", &entry)); |
+ entry->Close(); |
+ ASSERT_EQ(net::OK, CreateEntry("The Second key", &entry)); |
+ entry->Close(); |
+} |
+ |
+TEST_P(DiskCacheBlockfileBackendTest, NewEvictionTrim) { |
+ TEST_DISABLED_IF( |
+ (blockfile_traits()->flags() & kNewEviction) != kNewEviction); |
+ InitCache(); |
+ |
+ disk_cache::Entry* entry; |
+ for (int i = 0; i < 100; i++) { |
+ std::string name(base::StringPrintf("Key %d", i)); |
+ ASSERT_EQ(net::OK, CreateEntry(name, &entry)); |
+ entry->Close(); |
+ if (i < 90) { |
+ // Entries 0 to 89 are in list 1; 90 to 99 are in list 0. |
+ ASSERT_EQ(net::OK, OpenEntry(name, &entry)); |
+ entry->Close(); |
+ } |
+ } |
+ |
+ // The first eviction must come from list 1 (10% limit), the second must come |
+ // from list 0. |
+ TrimForTest(false); |
+ EXPECT_NE(net::OK, OpenEntry("Key 0", &entry)); |
+ TrimForTest(false); |
+ EXPECT_NE(net::OK, OpenEntry("Key 90", &entry)); |
+ |
+ // Double check that we still have the list tails. |
+ ASSERT_EQ(net::OK, OpenEntry("Key 1", &entry)); |
+ entry->Close(); |
+ ASSERT_EQ(net::OK, OpenEntry("Key 91", &entry)); |
+ entry->Close(); |
+} |
+ |
+TEST_P(DiskCacheBlockfileBackendTest, ExternalFiles) { |
+ InitCache(); |
+ // First, let's create a file on the folder. |
+ base::FilePath filename = cache_path().AppendASCII("f_000001"); |
+ |
+ const int kSize = 50; |
+ scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize)); |
+ CacheTestFillBuffer(buffer1->data(), kSize, false); |
+ ASSERT_EQ(kSize, file_util::WriteFile(filename, buffer1->data(), kSize)); |
+ |
+ // Now let's create a file with the cache. |
+ disk_cache::Entry* entry; |
+ ASSERT_EQ(net::OK, CreateEntry("key", &entry)); |
+ ASSERT_EQ(0, WriteData(entry, 0, 20000, buffer1.get(), 0, false)); |
+ entry->Close(); |
+ |
+ // And verify that the first file is still there. |
+ scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize)); |
+ ASSERT_EQ(kSize, base::ReadFile(filename, buffer2->data(), kSize)); |
+ EXPECT_EQ(0, memcmp(buffer1->data(), buffer2->data(), kSize)); |
+} |
+ |
+// Before looking for invalid entries, let's check a valid entry. |
+TEST_P(DiskCacheBlockfileBackendTest, ValidEntry) { |
+ InitCache(); |
+ |
+ std::string key("Some key"); |
+ disk_cache::Entry* entry; |
+ ASSERT_EQ(net::OK, CreateEntry(key, &entry)); |
+ |
+ const int kSize = 50; |
+ scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize)); |
+ memset(buffer1->data(), 0, kSize); |
+ base::strlcpy(buffer1->data(), "And the data to save", kSize); |
+ EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer1.get(), kSize, false)); |
+ entry->Close(); |
+ SimulateCrash(); |
+ |
+ ASSERT_EQ(net::OK, OpenEntry(key, &entry)); |
+ |
+ scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize)); |
+ memset(buffer2->data(), 0, kSize); |
+ EXPECT_EQ(kSize, ReadData(entry, 0, 0, buffer2.get(), kSize)); |
+ entry->Close(); |
+ EXPECT_STREQ(buffer1->data(), buffer2->data()); |
+} |
+ |
+// The same logic of the previous test (ValidEntry), but this time force the |
+// entry to be invalid, simulating a crash in the middle. |
+// We'll be leaking memory from this test. |
+TEST_P(DiskCacheBlockfileBackendTest, InvalidEntry) { |
+ InitCache(); |
+ |
+ std::string key("Some key"); |
+ disk_cache::Entry* entry; |
+ ASSERT_EQ(net::OK, CreateEntry(key, &entry)); |
+ |
+ const int kSize = 50; |
+ scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize)); |
+ memset(buffer->data(), 0, kSize); |
+ base::strlcpy(buffer->data(), "And the data to save", kSize); |
+ EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false)); |
+ SimulateCrash(); |
+ |
+ EXPECT_NE(net::OK, OpenEntry(key, &entry)); |
+ EXPECT_EQ(0, cache()->GetEntryCount()); |
+} |
+ |
+// Almost the same test, but this time crash the cache after reading an entry. |
+// We'll be leaking memory from this test. |
+TEST_P(DiskCacheBlockfileBackendTest, InvalidEntryRead) { |
+ InitCache(); |
+ |
+ std::string key("Some key"); |
+ disk_cache::Entry* entry; |
+ ASSERT_EQ(net::OK, CreateEntry(key, &entry)); |
+ |
+ const int kSize = 50; |
+ scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize)); |
+ memset(buffer->data(), 0, kSize); |
+ base::strlcpy(buffer->data(), "And the data to save", kSize); |
+ EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false)); |
+ entry->Close(); |
+ ASSERT_EQ(net::OK, OpenEntry(key, &entry)); |
+ EXPECT_EQ(kSize, ReadData(entry, 0, 0, buffer.get(), kSize)); |
+ |
+ SimulateCrash(); |
+ |
+ if (cache_impl()->cache_type() == net::APP_CACHE) { |
+ // Reading an entry and crashing should not make it dirty. |
+ ASSERT_EQ(net::OK, OpenEntry(key, &entry)); |
+ EXPECT_EQ(1, cache()->GetEntryCount()); |
+ entry->Close(); |
+ } else { |
+ EXPECT_NE(net::OK, OpenEntry(key, &entry)); |
+ EXPECT_EQ(0, cache()->GetEntryCount()); |
+ } |
+} |
+ |
+// We'll be leaking memory from this test. |
+TEST_P(DiskCacheBlockfileBackendTest, InvalidEntryWithLoad) { |
+ // Work with a tiny index table (16 entries) |
+ SetMask(0x1); // 2-entry table. |
+ SetMaxSize(0x100000); |
+ InitCache(); |
+ |
+ int seed = static_cast<int>(base::Time::Now().ToInternalValue()); |
+ srand(seed); |
+ |
+ const int kNumEntries = 100; |
+ disk_cache::Entry* entries[kNumEntries]; |
+ for (int i = 0; i < kNumEntries; i++) { |
+ std::string key = GenerateKey(true); |
+ ASSERT_EQ(net::OK, CreateEntry(key, &entries[i])); |
+ } |
+ EXPECT_EQ(kNumEntries, cache()->GetEntryCount()); |
+ |
+ for (int i = 0; i < kNumEntries; i++) { |
+ int source1 = rand() % kNumEntries; |
+ int source2 = rand() % kNumEntries; |
+ disk_cache::Entry* temp = entries[source1]; |
+ entries[source1] = entries[source2]; |
+ entries[source2] = temp; |
+ } |
+ |
+ std::string keys[kNumEntries]; |
+ for (int i = 0; i < kNumEntries; i++) { |
+ keys[i] = entries[i]->GetKey(); |
+ if (i < kNumEntries / 2) |
+ entries[i]->Close(); |
+ } |
+ |
+ SimulateCrash(); |
+ |
+ for (int i = kNumEntries / 2; i < kNumEntries; i++) { |
+ disk_cache::Entry* entry; |
+ EXPECT_NE(net::OK, OpenEntry(keys[i], &entry)); |
+ } |
+ |
+ for (int i = 0; i < kNumEntries / 2; i++) { |
+ disk_cache::Entry* entry; |
+ ASSERT_EQ(net::OK, OpenEntry(keys[i], &entry)); |
+ entry->Close(); |
+ } |
+ |
+ EXPECT_EQ(kNumEntries / 2, cache()->GetEntryCount()); |
+} |
+ |
+TEST_P(DiskCacheBlockfileBackendTest, TrimInvalidEntry) { |
+ const int kSize = 0x3000; // 12 kB |
+ SetMaxSize(kSize * 10); |
+ InitCache(); |
+ |
+ std::string first("some key"); |
+ std::string second("something else"); |
+ disk_cache::Entry* entry; |
+ ASSERT_EQ(net::OK, CreateEntry(first, &entry)); |
+ |
+ scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize)); |
+ memset(buffer->data(), 0, kSize); |
+ EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false)); |
+ |
+ // Simulate a crash. |
+ SimulateCrash(); |
+ |
+ ASSERT_EQ(net::OK, CreateEntry(second, &entry)); |
+ EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false)); |
+ |
+ EXPECT_EQ(2, cache()->GetEntryCount()); |
+ SetMaxSize(kSize); |
+ entry->Close(); // Trim the cache. |
+ FlushQueueForTest(); |
+ |
+ // If we evicted the entry in less than 20mS, we have one entry in the cache; |
+ // if it took more than that, we posted a task and we'll delete the second |
+ // entry too. |
+ base::MessageLoop::current()->RunUntilIdle(); |
+ |
+ // This may be not thread-safe in general, but for now it's OK so add some |
+ // ThreadSanitizer annotations to ignore data races on cache_. |
+ // See http://crbug.com/55970 |
+ ANNOTATE_IGNORE_READS_BEGIN(); |
+ EXPECT_GE(1, cache()->GetEntryCount()); |
+ ANNOTATE_IGNORE_READS_END(); |
+ |
+ EXPECT_NE(net::OK, OpenEntry(first, &entry)); |
+} |
+ |
+// We'll be leaking memory from this test. |
+TEST_P(DiskCacheBlockfileBackendTest, TrimInvalidEntry2) { |
+ SetMask(0xf); // 16-entry table. |
+ |
+ const int kSize = 0x3000; // 12 kB |
+ SetMaxSize(kSize * 40); |
+ InitCache(); |
+ |
+ scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize)); |
+ memset(buffer->data(), 0, kSize); |
+ disk_cache::Entry* entry; |
+ |
+ // Writing 32 entries to this cache chains most of them. |
+ for (int i = 0; i < 32; i++) { |
+ std::string key(base::StringPrintf("some key %d", i)); |
+ ASSERT_EQ(net::OK, CreateEntry(key, &entry)); |
+ EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false)); |
+ entry->Close(); |
+ ASSERT_EQ(net::OK, OpenEntry(key, &entry)); |
+ // Note that we are not closing the entries. |
+ } |
+ |
+ // Simulate a crash. |
+ SimulateCrash(); |
+ |
+ ASSERT_EQ(net::OK, CreateEntry("Something else", &entry)); |
+ EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false)); |
+ |
+ FlushQueueForTest(); |
+ EXPECT_EQ(33, cache()->GetEntryCount()); |
+ SetMaxSize(kSize); |
+ |
+ // For the new eviction code, all corrupt entries are on the second list so |
+ // they are not going away that easy. |
+ // if (new_eviction_) { |
+ EXPECT_EQ(net::OK, DoomAllEntries()); |
+ //} |
+ |
+ entry->Close(); // Trim the cache. |
+ FlushQueueForTest(); |
+ |
+ // We may abort the eviction before cleaning up everything. |
+ base::MessageLoop::current()->RunUntilIdle(); |
+ FlushQueueForTest(); |
+ // If it's not clear enough: we may still have eviction tasks running at this |
+ // time, so the number of entries is changing while we read it. |
+ ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN(); |
+ EXPECT_GE(30, cache()->GetEntryCount()); |
+ ANNOTATE_IGNORE_READS_AND_WRITES_END(); |
+} |
+ |
+void BackendTransaction(DiskCacheBlockfileBackendTest* test, |
+ const std::string& name, |
+ int num_entries, bool load) { |
+ // success_ = false; |
+ ASSERT_TRUE(test->CopyTestCache(name)); |
+ // DisableFirstCleanup(); |
+ |
+ uint32 mask; |
+ if (load) { |
+ mask = 0xf; |
+ test->SetMaxSize(0x100000); |
+ } else { |
+ // Clear the settings from the previous run. |
+ mask = 0; |
+ test->SetMaxSize(0); |
+ } |
+ test->SetMask(mask); |
+ |
+ test->InitCache(); |
+ ASSERT_EQ(num_entries + 1, test->cache()->GetEntryCount()); |
+ |
+ std::string key("the first key"); |
+ disk_cache::Entry* entry1; |
+ ASSERT_NE(net::OK, test->OpenEntry(key, &entry1)); |
+ |
+ int actual = test->cache()->GetEntryCount(); |
+ if (num_entries != actual) { |
+ ASSERT_TRUE(load); |
+ // If there is a heavy load, inserting an entry will make another entry |
+ // dirty (on the hash bucket) so two entries are removed. |
+ ASSERT_EQ(num_entries - 1, actual); |
+ } |
+ |
+ // test->cache().reset(); |
+ // cache_impl_ = NULL; |
+ |
+ // ASSERT_TRUE(CheckCacheIntegrity(cache_path_, new_eviction_, mask)); |
+ //success_ = true; |
+} |
+ |
+TEST_P(DiskCacheBlockfileBackendTest, RecoverInsert) { |
+ // Tests with an empty cache. |
+ ASSERT_NO_FATAL_FAILURE(BackendTransaction(this, "insert_empty1", 0, false)); |
+ ASSERT_NO_FATAL_FAILURE(BackendTransaction(this, "insert_empty2", 0, false)); |
+ ASSERT_NO_FATAL_FAILURE(BackendTransaction(this, "insert_empty3", 0, false)); |
+ |
+ // Tests with one entry on the cache. |
+ ASSERT_NO_FATAL_FAILURE(BackendTransaction(this, "insert_one1", 1, false)); |
+ ASSERT_NO_FATAL_FAILURE(BackendTransaction(this, "insert_one2", 1, false)); |
+ ASSERT_NO_FATAL_FAILURE(BackendTransaction(this, "insert_one3", 1, false)); |
+ |
+ // Tests with one hundred entries on the cache, tiny index. |
+ ASSERT_NO_FATAL_FAILURE(BackendTransaction(this, "insert_load1", 100, true)); |
+ ASSERT_NO_FATAL_FAILURE(BackendTransaction(this, "insert_load2", 100, true)); |
+} |
+ |
+TEST_P(DiskCacheBlockfileBackendTest, RecoverRemove) { |
+ // Removing the only element. |
+ ASSERT_NO_FATAL_FAILURE(BackendTransaction(this, "remove_one1", 0, false)); |
+ ASSERT_NO_FATAL_FAILURE(BackendTransaction(this, "remove_one2", 0, false)); |
+ ASSERT_NO_FATAL_FAILURE(BackendTransaction(this, "remove_one3", 0, false)); |
+ |
+ // Removing the head. |
+ ASSERT_NO_FATAL_FAILURE(BackendTransaction(this, "remove_head1", 1, false)); |
+ ASSERT_NO_FATAL_FAILURE(BackendTransaction(this, "remove_head2", 1, false)); |
+ ASSERT_NO_FATAL_FAILURE(BackendTransaction(this, "remove_head3", 1, false)); |
+ |
+ // Removing the tail. |
+ ASSERT_NO_FATAL_FAILURE(BackendTransaction(this, "remove_tail1", 1, false)); |
+ ASSERT_NO_FATAL_FAILURE(BackendTransaction(this, "remove_tail2", 1, false)); |
+ ASSERT_NO_FATAL_FAILURE(BackendTransaction(this, "remove_tail3", 1, false)); |
+ |
+ // Removing with one hundred entries on the cache, tiny index. |
+ ASSERT_NO_FATAL_FAILURE(BackendTransaction(this, "remove_load1", 100, true)); |
+ ASSERT_NO_FATAL_FAILURE(BackendTransaction(this, "remove_load2", 100, true)); |
+ ASSERT_NO_FATAL_FAILURE(BackendTransaction(this, "remove_load3", 100, true)); |
+ |
+ // This case cannot be reverted. |
+ ASSERT_NO_FATAL_FAILURE(BackendTransaction(this, "remove_one4", 0, false)); |
+ ASSERT_NO_FATAL_FAILURE(BackendTransaction(this, "remove_head4", 1, false)); |
+} |
+ |
+TEST_P(DiskCacheBlockfileBackendTest, RecoverWithEviction) { |
+ ASSERT_TRUE(CopyTestCache("insert_load1")); |
+ //DisableFirstCleanup(); |
+ |
+ SetMask(0xf); |
+ SetMaxSize(0x1000); |
+ |
+ // We should not crash here. |
+ InitCache(); |
+ DisableIntegrityCheck(); |
+} |
+ |
+// Tests that the |BackendImpl| fails to start with the wrong cache version. |
+TEST_P(DiskCacheBlockfileBackendTest, WrongVersion) { |
+ ASSERT_TRUE(CopyTestCache("wrong_version")); |
+ base::Thread cache_thread("CacheThread"); |
+ ASSERT_TRUE(cache_thread.StartWithOptions( |
+ base::Thread::Options(base::MessageLoop::TYPE_IO, 0))); |
+ net::TestCompletionCallback cb; |
+ |
+ scoped_ptr<disk_cache::BackendImpl> cache(new disk_cache::BackendImpl( |
+ cache_path(), cache_thread.message_loop_proxy().get(), NULL)); |
+ int rv = cache->Init(cb.callback()); |
+ ASSERT_EQ(net::ERR_FAILED, cb.GetResult(rv)); |
+} |
+ |
+#if 0 |
+// Tests that the cache is properly restarted on recovery error. |
+TEST_P(DiskCacheBlockfileBackendTest, DeleteOld) { |
+ ASSERT_TRUE(CopyTestCache("wrong_version")); |
+ base::Thread cache_thread("CacheThread"); |
+ ASSERT_TRUE(cache_thread.StartWithOptions( |
+ base::Thread::Options(base::MessageLoop::TYPE_IO, 0))); |
+ |
+ net::TestCompletionCallback cb; |
+ bool prev = base::ThreadRestrictions::SetIOAllowed(false); |
+ base::FilePath path(cache_path()); |
+ int rv = |
+ disk_cache::CreateCacheBackend(net::DISK_CACHE, |
+ net::CACHE_BACKEND_BLOCKFILE, |
+ path, |
+ 0, |
+ true, |
+ cache_thread.message_loop_proxy().get(), |
+ NULL, |
+ cache(), |
+ cb.callback()); |
+ path.clear(); // Make sure path was captured by the previous call. |
+ ASSERT_EQ(net::OK, cb.GetResult(rv)); |
+ base::ThreadRestrictions::SetIOAllowed(prev); |
+ // cache_.reset(); |
+ //EXPECT_TRUE(CheckCacheIntegrity(cache_path(), new_eviction_, mask_)); |
+} |
+#endif |
+ |
+// We want to be able to deal with messed up entries on disk. |
+TEST_P(DiskCacheBlockfileBackendTest, InvalidEntry2) { |
+ ASSERT_TRUE(CopyTestCache("bad_entry")); |
+ //DisableFirstCleanup(); |
+ InitCache(); |
+ |
+ disk_cache::Entry *entry1, *entry2; |
+ ASSERT_EQ(net::OK, OpenEntry("the first key", &entry1)); |
+ EXPECT_NE(net::OK, OpenEntry("some other key", &entry2)); |
+ entry1->Close(); |
+ |
+ // CheckCacheIntegrity will fail at this point. |
+ //DisableIntegrityCheck(); |
+} |
+ |
+// Tests that we don't crash or hang when enumerating this cache. |
+TEST_P(DiskCacheBlockfileBackendTest, InvalidEntry3) { |
+ SetMask(0x1); // 2-entry table. |
+ SetMaxSize(0x3000); // 12 kB. |
+ InitCache(); |
+ |
+ disk_cache::Entry* entry; |
+ void* iter = NULL; |
+ while (OpenNextEntry(&iter, &entry) == net::OK) { |
+ entry->Close(); |
+ } |
+} |
+ |
+// Test that we handle a dirty entry on the LRU list, already replaced with |
+// the same key, and with hash collisions. |
+TEST_P(DiskCacheBlockfileBackendTest, InvalidEntry4) { |
+ ASSERT_TRUE(CopyTestCache("dirty_entry3")); |
+ SetMask(0x1); // 2-entry table. |
+ SetMaxSize(0x3000); // 12 kB. |
+ //DisableFirstCleanup(); |
+ InitCache(); |
+ |
+ TrimForTest(false); |
+} |
+ |
+// Test that we handle a dirty entry on the deleted list, already replaced with |
+// the same key, and with hash collisions. |
+TEST_P(DiskCacheBlockfileBackendTest, DISABLED_InvalidEntry5) { |
+ ASSERT_TRUE(CopyTestCache("dirty_entry4")); |
+ // SetNewEviction(); |
+ SetMask(0x1); // 2-entry table. |
+ SetMaxSize(0x3000); // 12 kB. |
+ //DisableFirstCleanup(); |
+ InitCache(); |
+ |
+ TrimDeletedListForTest(false); |
+} |
+ |
+TEST_P(DiskCacheBlockfileBackendTest, InvalidEntry6) { |
+ ASSERT_TRUE(CopyTestCache("dirty_entry5")); |
+ SetMask(0x1); // 2-entry table. |
+ SetMaxSize(0x3000); // 12 kB. |
+ InitCache(); |
+ |
+ // There is a dirty entry (but marked as clean) at the end, pointing to a |
+ // deleted entry through the hash collision list. We should not re-insert the |
+ // deleted entry into the index table. |
+ |
+ TrimForTest(false); |
+ // The cache should be clean (as detected by CheckCacheIntegrity). |
+} |
+ |
+// Tests that we don't hang when there is a loop on the hash collision list. |
+// The test cache could be a result of bug 69135. |
+TEST_P(DiskCacheBlockfileBackendTest, BadNextEntry1) { |
+ ASSERT_TRUE(CopyTestCache("list_loop2")); |
+ SetMask(0x1); // 2-entry table. |
+ SetMaxSize(0x3000); // 12 kB. |
+ InitCache(); |
+ |
+ // The second entry points at itselft, and the first entry is not accessible |
+ // though the index, but it is at the head of the LRU. |
+ |
+ disk_cache::Entry* entry; |
+ ASSERT_EQ(net::OK, CreateEntry("The first key", &entry)); |
+ entry->Close(); |
+ |
+ TrimForTest(false); |
+ TrimForTest(false); |
+ ASSERT_EQ(net::OK, OpenEntry("The first key", &entry)); |
+ entry->Close(); |
+ EXPECT_EQ(1, cache()->GetEntryCount()); |
+} |
+ |
+// Tests that we don't hang when there is a loop on the hash collision list. |
+// The test cache could be a result of bug 69135. |
+TEST_P(DiskCacheBlockfileBackendTest, BadNextEntry2) { |
+ ASSERT_TRUE(CopyTestCache("list_loop3")); |
+ SetMask(0x1); // 2-entry table. |
+ SetMaxSize(0x3000); // 12 kB. |
+ InitCache(); |
+ |
+ // There is a wide loop of 5 entries. |
+ |
+ disk_cache::Entry* entry; |
+ ASSERT_NE(net::OK, OpenEntry("Not present key", &entry)); |
+} |
+ |
+TEST_P(DiskCacheBlockfileBackendTest, DISABLED_NewEvictionInvalidEntry6) { |
+ ASSERT_TRUE(CopyTestCache("bad_rankings3")); |
+ //DisableFirstCleanup(); |
+ //SetNewEviction(); |
+ InitCache(); |
+ |
+ // The second entry is dirty, but removing it should not corrupt the list. |
+ disk_cache::Entry* entry; |
+ ASSERT_NE(net::OK, OpenEntry("the second key", &entry)); |
+ ASSERT_EQ(net::OK, OpenEntry("the first key", &entry)); |
+ |
+ // This should not delete the cache. |
+ entry->Doom(); |
+ FlushQueueForTest(); |
+ entry->Close(); |
+ |
+ ASSERT_EQ(net::OK, OpenEntry("some other key", &entry)); |
+ entry->Close(); |
+} |
+ |
+// Tests handling of corrupt entries by keeping the rankings node around, with |
+// a fatal failure. |
+TEST_P(DiskCacheBlockfileBackendTest, InvalidEntry7) { |
+ const int kSize = 0x3000; // 12 kB. |
+ SetMaxSize(kSize * 10); |
+ InitCache(); |
+ |
+ std::string first("some key"); |
+ std::string second("something else"); |
+ disk_cache::Entry* entry; |
+ ASSERT_EQ(net::OK, CreateEntry(first, &entry)); |
+ entry->Close(); |
+ ASSERT_EQ(net::OK, CreateEntry(second, &entry)); |
+ |
+ // Corrupt this entry. |
+ disk_cache::EntryImpl* entry_impl = |
+ static_cast<disk_cache::EntryImpl*>(entry); |
+ |
+ entry_impl->rankings()->Data()->next = 0; |
+ entry_impl->rankings()->Store(); |
+ entry->Close(); |
+ FlushQueueForTest(); |
+ EXPECT_EQ(2, cache()->GetEntryCount()); |
+ |
+ // This should detect the bad entry. |
+ EXPECT_NE(net::OK, OpenEntry(second, &entry)); |
+ EXPECT_EQ(1, cache()->GetEntryCount()); |
+ |
+ // We should delete the cache. The list still has a corrupt node. |
+ void* iter = NULL; |
+ EXPECT_NE(net::OK, OpenNextEntry(&iter, &entry)); |
+ FlushQueueForTest(); |
+ EXPECT_EQ(0, cache()->GetEntryCount()); |
+} |
+ |
+// Tests handling of corrupt entries by keeping the rankings node around, with |
+// a non fatal failure. |
+TEST_P(DiskCacheBlockfileBackendTest, InvalidEntry8) { |
+ const int kSize = 0x3000; // 12 kB |
+ SetMaxSize(kSize * 10); |
+ InitCache(); |
+ |
+ std::string first("some key"); |
+ std::string second("something else"); |
+ disk_cache::Entry* entry; |
+ ASSERT_EQ(net::OK, CreateEntry(first, &entry)); |
+ entry->Close(); |
+ ASSERT_EQ(net::OK, CreateEntry(second, &entry)); |
+ |
+ // Corrupt this entry. |
+ disk_cache::EntryImpl* entry_impl = |
+ static_cast<disk_cache::EntryImpl*>(entry); |
+ |
+ entry_impl->rankings()->Data()->contents = 0; |
+ entry_impl->rankings()->Store(); |
+ entry->Close(); |
+ FlushQueueForTest(); |
+ EXPECT_EQ(2, cache()->GetEntryCount()); |
+ |
+ // This should detect the bad entry. |
+ EXPECT_NE(net::OK, OpenEntry(second, &entry)); |
+ EXPECT_EQ(1, cache()->GetEntryCount()); |
+ |
+ // We should not delete the cache. |
+ void* iter = NULL; |
+ ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry)); |
+ entry->Close(); |
+ EXPECT_NE(net::OK, OpenNextEntry(&iter, &entry)); |
+ EXPECT_EQ(1, cache()->GetEntryCount()); |
+} |
+ |
+#if 0 |
+// Tests handling of corrupt entries detected by enumerations. |
+void DiskCacheBackendTest::BackendInvalidEntry10(bool eviction) { |
+ const int kSize = 0x3000; // 12 kB. |
+ SetMaxSize(kSize * 10); |
+ SetNewEviction(); |
+ InitCache(); |
+ |
+ std::string first("some key"); |
+ std::string second("something else"); |
+ disk_cache::Entry* entry; |
+ ASSERT_EQ(net::OK, CreateEntry(first, &entry)); |
+ entry->Close(); |
+ ASSERT_EQ(net::OK, OpenEntry(first, &entry)); |
+ EXPECT_EQ(0, WriteData(entry, 0, 200, NULL, 0, false)); |
+ entry->Close(); |
+ ASSERT_EQ(net::OK, CreateEntry(second, &entry)); |
+ |
+ // Corrupt this entry. |
+ disk_cache::EntryImpl* entry_impl = |
+ static_cast<disk_cache::EntryImpl*>(entry); |
+ |
+ entry_impl->entry()->Data()->state = 0xbad; |
+ entry_impl->entry()->Store(); |
+ entry->Close(); |
+ ASSERT_EQ(net::OK, CreateEntry("third", &entry)); |
+ entry->Close(); |
+ EXPECT_EQ(3, cache()->GetEntryCount()); |
+ |
+ // We have: |
+ // List 0: third -> second (bad). |
+ // List 1: first. |
+ |
+ if (eviction) { |
+ // Detection order: second -> first -> third. |
+ TrimForTest(false); |
+ EXPECT_EQ(3, cache()->GetEntryCount()); |
+ TrimForTest(false); |
+ EXPECT_EQ(2, cache()->GetEntryCount()); |
+ TrimForTest(false); |
+ EXPECT_EQ(1, cache()->GetEntryCount()); |
+ } else { |
+ // Detection order: third -> second -> first. |
+ // We should detect the problem through the list, but we should not delete |
+ // the entry. |
+ void* iter = NULL; |
+ ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry)); |
+ entry->Close(); |
+ ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry)); |
+ EXPECT_EQ(first, entry->GetKey()); |
+ entry->Close(); |
+ EXPECT_NE(net::OK, OpenNextEntry(&iter, &entry)); |
+ } |
+ DisableIntegrityCheck(); |
+} |
+#endif |
+ |
+// Tests handling of corrupt entries in the middle of a long eviction run. |
+TEST_P(DiskCacheBlockfileBackendTest, TrimInvalidEntry12) { |
+ const int kSize = 0x3000; // 12 kB |
+ SetMaxSize(kSize * 10); |
+ InitCache(); |
+ |
+ std::string first("some key"); |
+ std::string second("something else"); |
+ disk_cache::Entry* entry; |
+ ASSERT_EQ(net::OK, CreateEntry(first, &entry)); |
+ entry->Close(); |
+ ASSERT_EQ(net::OK, CreateEntry(second, &entry)); |
+ |
+ // Corrupt this entry. |
+ disk_cache::EntryImpl* entry_impl = |
+ static_cast<disk_cache::EntryImpl*>(entry); |
+ |
+ entry_impl->entry()->Data()->state = 0xbad; |
+ entry_impl->entry()->Store(); |
+ entry->Close(); |
+ ASSERT_EQ(net::OK, CreateEntry("third", &entry)); |
+ entry->Close(); |
+ ASSERT_EQ(net::OK, CreateEntry("fourth", &entry)); |
+ TrimForTest(true); |
+ EXPECT_EQ(1, cache()->GetEntryCount()); |
+ entry->Close(); |
+ DisableIntegrityCheck(); |
+} |
+ |
+#if 0 |
+// If the LRU is corrupt, we delete the cache. |
+void DiskCacheBackendTest::BackendInvalidRankings() { |
+ disk_cache::Entry* entry; |
+ void* iter = NULL; |
+ ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry)); |
+ entry->Close(); |
+ EXPECT_EQ(2, cache()->GetEntryCount()); |
+ |
+ EXPECT_NE(net::OK, OpenNextEntry(&iter, &entry)); |
+ FlushQueueForTest(); // Allow the restart to finish. |
+ EXPECT_EQ(0, cache()->GetEntryCount()); |
+} |
+#endif |
+ |
+// We want to be able to deal with messed up entries on disk. |
+TEST_P(DiskCacheBlockfileBackendTest, InvalidRankings2) { |
+ ASSERT_TRUE(CopyTestCache("bad_rankings")); |
+ InitCache(); |
+ |
+ disk_cache::Entry *entry1, *entry2; |
+ EXPECT_NE(net::OK, OpenEntry("the first key", &entry1)); |
+ ASSERT_EQ(net::OK, OpenEntry("some other key", &entry2)); |
+ entry2->Close(); |
+ |
+ // CheckCacheIntegrity will fail at this point. |
+ DisableIntegrityCheck(); |
+} |
+ |
+// If the index size changes when we disable the cache, we should not crash. |
+TEST_P(DiskCacheBlockfileBackendTest, DisableSucess3) { |
+ ASSERT_TRUE(CopyTestCache("bad_rankings")); |
+ InitCache(); |
+ |
+ disk_cache::Entry *entry1, *entry2; |
+ void* iter = NULL; |
+ EXPECT_EQ(2, cache()->GetEntryCount()); |
+ ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry1)); |
+ entry1->Close(); |
+ |
+ EXPECT_NE(net::OK, OpenNextEntry(&iter, &entry2)); |
+ FlushQueueForTest(); |
+ |
+ ASSERT_EQ(net::OK, CreateEntry("Something new", &entry2)); |
+ entry2->Close(); |
+ |
+ EXPECT_EQ(1, cache()->GetEntryCount()); |
+} |
+ |
+// If we disable the cache, already open entries should work as far as possible. |
+TEST_P(DiskCacheBlockfileBackendTest, DisableSuccess4) { |
+ ASSERT_TRUE(CopyTestCache("bad_rankings")); |
+ InitCache(); |
+ |
+ disk_cache::Entry *entry1, *entry2, *entry3, *entry4; |
+ void* iter = NULL; |
+ ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry1)); |
+ |
+ char key2[2000]; |
+ char key3[20000]; |
+ CacheTestFillBuffer(key2, sizeof(key2), true); |
+ CacheTestFillBuffer(key3, sizeof(key3), true); |
+ key2[sizeof(key2) - 1] = '\0'; |
+ key3[sizeof(key3) - 1] = '\0'; |
+ ASSERT_EQ(net::OK, CreateEntry(key2, &entry2)); |
+ ASSERT_EQ(net::OK, CreateEntry(key3, &entry3)); |
+ |
+ const int kBufSize = 20000; |
+ scoped_refptr<net::IOBuffer> buf(new net::IOBuffer(kBufSize)); |
+ memset(buf->data(), 0, kBufSize); |
+ EXPECT_EQ(100, WriteData(entry2, 0, 0, buf.get(), 100, false)); |
+ EXPECT_EQ(kBufSize, WriteData(entry3, 0, 0, buf.get(), kBufSize, false)); |
+ |
+ // This line should disable the cache but not delete it. |
+ EXPECT_NE(net::OK, OpenNextEntry(&iter, &entry4)); |
+ EXPECT_EQ(0, cache()->GetEntryCount()); |
+ |
+ EXPECT_NE(net::OK, CreateEntry("cache is disabled", &entry4)); |
+ |
+ EXPECT_EQ(100, ReadData(entry2, 0, 0, buf.get(), 100)); |
+ EXPECT_EQ(100, WriteData(entry2, 0, 0, buf.get(), 100, false)); |
+ EXPECT_EQ(100, WriteData(entry2, 1, 0, buf.get(), 100, false)); |
+ |
+ EXPECT_EQ(kBufSize, ReadData(entry3, 0, 0, buf.get(), kBufSize)); |
+ EXPECT_EQ(kBufSize, WriteData(entry3, 0, 0, buf.get(), kBufSize, false)); |
+ EXPECT_EQ(kBufSize, WriteData(entry3, 1, 0, buf.get(), kBufSize, false)); |
+ |
+ std::string key = entry2->GetKey(); |
+ EXPECT_EQ(sizeof(key2) - 1, key.size()); |
+ key = entry3->GetKey(); |
+ EXPECT_EQ(sizeof(key3) - 1, key.size()); |
+ |
+ entry1->Close(); |
+ entry2->Close(); |
+ entry3->Close(); |
+ FlushQueueForTest(); // Flushing the Close posts a task to restart the cache. |
+ FlushQueueForTest(); // This one actually allows that task to complete. |
+ |
+ EXPECT_EQ(0, cache()->GetEntryCount()); |
+} |
+ |
+TEST_P(DiskCacheBlockfileBackendTest, UsageStatsTimer) { |
+ MessageLoopHelper helper; |
+ |
+ ASSERT_TRUE(CleanupCacheDir()); |
+ scoped_ptr<disk_cache::BackendImpl> cache; |
+ cache.reset(new disk_cache::BackendImpl( |
+ cache_path(), base::MessageLoopProxy::current().get(), NULL)); |
+ ASSERT_TRUE(NULL != cache.get()); |
+ cache->SetUnitTestMode(); |
+ ASSERT_EQ(net::OK, cache->SyncInit()); |
+ |
+ // Wait for a callback that never comes... about 2 secs :). The message loop |
+ // has to run to allow invocation of the usage timer. |
+ helper.WaitUntilCacheIoFinished(1); |
+} |
+ |
+TEST_P(DiskCacheBlockfileBackendTest, TimerNotCreated) { |
+ ASSERT_TRUE(CopyTestCache("wrong_version")); |
+ |
+ scoped_ptr<disk_cache::BackendImpl> cache; |
+ cache.reset(new disk_cache::BackendImpl( |
+ cache_path(), base::MessageLoopProxy::current().get(), NULL)); |
+ ASSERT_TRUE(NULL != cache.get()); |
+ cache->SetUnitTestMode(); |
+ ASSERT_NE(net::OK, cache->SyncInit()); |
+ |
+ ASSERT_TRUE(NULL == cache->GetTimerForTest()); |
+ |
+ DisableIntegrityCheck(); |
+} |
+ |
+TEST_P(DiskCacheBlockfileBackendTest, UsageStats) { |
+ InitCache(); |
+ disk_cache::Entry* entry; |
+ ASSERT_EQ(net::OK, CreateEntry("key", &entry)); |
+ entry->Close(); |
+ FlushQueueForTest(); |
+ |
+ disk_cache::StatsItems stats; |
+ cache()->GetStats(&stats); |
+ EXPECT_FALSE(stats.empty()); |
+ |
+ disk_cache::StatsItems::value_type hits("Create hit", "0x1"); |
+ EXPECT_EQ(1, std::count(stats.begin(), stats.end(), hits)); |
+ |
+ // cache().reset(); |
+ |
+ // Now open the cache and verify that the stats are still there. |
+ //DisableFirstCleanup(); |
+ |
+ InitCache(); |
+ EXPECT_EQ(1, cache()->GetEntryCount()); |
+ |
+ stats.clear(); |
+ cache()->GetStats(&stats); |
+ EXPECT_FALSE(stats.empty()); |
+ |
+ EXPECT_EQ(1, std::count(stats.begin(), stats.end(), hits)); |
+} |
+ |
+// If the index size changes when we doom the cache, we should not crash. |
+TEST_P(DiskCacheBlockfileBackendTest, DoomAll2) { |
+ ASSERT_TRUE(CopyTestCache("bad_rankings2")); |
+ SetMaxSize(20 * 1024 * 1024); |
+ InitCache(); |
+ |
+ EXPECT_EQ(2, cache()->GetEntryCount()); |
+ EXPECT_EQ(net::OK, DoomAllEntries()); |
+ |
+ disk_cache::Entry* entry; |
+ ASSERT_EQ(net::OK, CreateEntry("Something new", &entry)); |
+ entry->Close(); |
+ |
+ EXPECT_EQ(1, cache()->GetEntryCount()); |
+} |
+ |
+// We should be able to create the same entry on multiple simultaneous instances |
+// of the cache. |
+TEST_P(DiskCacheBlockfileBackendTest, MultipleInstances) { |
+ base::ScopedTempDir store1, store2; |
+ ASSERT_TRUE(store1.CreateUniqueTempDir()); |
+ ASSERT_TRUE(store2.CreateUniqueTempDir()); |
+ |
+ base::Thread cache_thread("CacheThread"); |
+ ASSERT_TRUE(cache_thread.StartWithOptions( |
+ base::Thread::Options(base::MessageLoop::TYPE_IO, 0))); |
+ net::TestCompletionCallback cb; |
+ |
+ const int kNumberOfCaches = 2; |
+ scoped_ptr<disk_cache::Backend> cache[kNumberOfCaches]; |
+ |
+ int rv = |
+ disk_cache::CreateCacheBackend(net::DISK_CACHE, |
+ net::CACHE_BACKEND_DEFAULT, |
+ store1.path(), |
+ 0, |
+ false, |
+ cache_thread.message_loop_proxy().get(), |
+ NULL, |
+ &cache[0], |
+ cb.callback()); |
+ ASSERT_EQ(net::OK, cb.GetResult(rv)); |
+ rv = disk_cache::CreateCacheBackend(net::MEDIA_CACHE, |
+ net::CACHE_BACKEND_DEFAULT, |
+ store2.path(), |
+ 0, |
+ false, |
+ cache_thread.message_loop_proxy().get(), |
+ NULL, |
+ &cache[1], |
+ cb.callback()); |
+ ASSERT_EQ(net::OK, cb.GetResult(rv)); |
+ |
+ ASSERT_TRUE(cache[0].get() != NULL && cache[1].get() != NULL); |
+ |
+ std::string key("the first key"); |
+ disk_cache::Entry* entry; |
+ for (int i = 0; i < kNumberOfCaches; i++) { |
+ rv = cache[i]->CreateEntry(key, &entry, cb.callback()); |
+ ASSERT_EQ(net::OK, cb.GetResult(rv)); |
+ entry->Close(); |
+ } |
+} |
+ |
+// Test the six regions of the curve that determines the max cache size. |
+TEST_P(DiskCacheBlockfileBackendTest, AutomaticMaxSize) { |
+ using disk_cache::kDefaultCacheSize; |
+ int64 large_size = kDefaultCacheSize; |
+ |
+ // Region 1: expected = available * 0.8 |
+ EXPECT_EQ((kDefaultCacheSize - 1) * 8 / 10, |
+ disk_cache::PreferredCacheSize(large_size - 1)); |
+ EXPECT_EQ(kDefaultCacheSize * 8 / 10, |
+ disk_cache::PreferredCacheSize(large_size)); |
+ EXPECT_EQ(kDefaultCacheSize - 1, |
+ disk_cache::PreferredCacheSize(large_size * 10 / 8 - 1)); |
+ |
+ // Region 2: expected = default_size |
+ EXPECT_EQ(kDefaultCacheSize, |
+ disk_cache::PreferredCacheSize(large_size * 10 / 8)); |
+ EXPECT_EQ(kDefaultCacheSize, |
+ disk_cache::PreferredCacheSize(large_size * 10 - 1)); |
+ |
+ // Region 3: expected = available * 0.1 |
+ EXPECT_EQ(kDefaultCacheSize, |
+ disk_cache::PreferredCacheSize(large_size * 10)); |
+ EXPECT_EQ((kDefaultCacheSize * 25 - 1) / 10, |
+ disk_cache::PreferredCacheSize(large_size * 25 - 1)); |
+ |
+ // Region 4: expected = default_size * 2.5 |
+ EXPECT_EQ(kDefaultCacheSize * 25 / 10, |
+ disk_cache::PreferredCacheSize(large_size * 25)); |
+ EXPECT_EQ(kDefaultCacheSize * 25 / 10, |
+ disk_cache::PreferredCacheSize(large_size * 100 - 1)); |
+ EXPECT_EQ(kDefaultCacheSize * 25 / 10, |
+ disk_cache::PreferredCacheSize(large_size * 100)); |
+ EXPECT_EQ(kDefaultCacheSize * 25 / 10, |
+ disk_cache::PreferredCacheSize(large_size * 250 - 1)); |
+ |
+ // Region 5: expected = available * 0.1 |
+ int64 largest_size = kDefaultCacheSize * 4; |
+ EXPECT_EQ(kDefaultCacheSize * 25 / 10, |
+ disk_cache::PreferredCacheSize(large_size * 250)); |
+ EXPECT_EQ(largest_size - 1, |
+ disk_cache::PreferredCacheSize(largest_size * 100 - 1)); |
+ |
+ // Region 6: expected = largest possible size |
+ EXPECT_EQ(largest_size, |
+ disk_cache::PreferredCacheSize(largest_size * 100)); |
+ EXPECT_EQ(largest_size, |
+ disk_cache::PreferredCacheSize(largest_size * 10000)); |
+} |
+ |
+// Tests that we can "migrate" a running instance from one experiment group to |
+// another. |
+TEST_P(DiskCacheBlockfileBackendTest, Histograms) { |
+ InitCache(); |
+ disk_cache::BackendImpl* backend_ = cache_impl(); // Needed be the macro. |
+ |
+ for (int i = 1; i < 3; i++) { |
+ CACHE_UMA(HOURS, "FillupTime", i, 28); |
+ } |
+} |
+ |
+// Make sure that we keep the total memory used by the internal buffers under |
+// control. |
+TEST_P(DiskCacheBlockfileBackendTest, TotalBuffersSize1) { |
+ InitCache(); |
+ std::string key("the first key"); |
+ disk_cache::Entry* entry; |
+ ASSERT_EQ(net::OK, CreateEntry(key, &entry)); |
+ |
+ const int kSize = 200; |
+ scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize)); |
+ CacheTestFillBuffer(buffer->data(), kSize, true); |
+ |
+ for (int i = 0; i < 10; i++) { |
+ SCOPED_TRACE(i); |
+ // Allocate 2MB for this entry. |
+ EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, true)); |
+ EXPECT_EQ(kSize, WriteData(entry, 1, 0, buffer.get(), kSize, true)); |
+ EXPECT_EQ(kSize, |
+ WriteData(entry, 0, 1024 * 1024, buffer.get(), kSize, false)); |
+ EXPECT_EQ(kSize, |
+ WriteData(entry, 1, 1024 * 1024, buffer.get(), kSize, false)); |
+ |
+ // Delete one of the buffers and truncate the other. |
+ EXPECT_EQ(0, WriteData(entry, 0, 0, buffer.get(), 0, true)); |
+ EXPECT_EQ(0, WriteData(entry, 1, 10, buffer.get(), 0, true)); |
+ |
+ // Delete the second buffer, writing 10 bytes to disk. |
+ entry->Close(); |
+ ASSERT_EQ(net::OK, OpenEntry(key, &entry)); |
+ } |
+ |
+ entry->Close(); |
+ EXPECT_EQ(0, cache_impl()->GetTotalBuffersSize()); |
+} |
+ |
+// This test assumes at least 150MB of system memory. |
+TEST_P(DiskCacheBlockfileBackendTest, TotalBuffersSize2) { |
+ InitCache(); |
+ |
+ const int kOneMB = 1024 * 1024; |
+ EXPECT_TRUE(cache_impl()->IsAllocAllowed(0, kOneMB)); |
+ EXPECT_EQ(kOneMB, cache_impl()->GetTotalBuffersSize()); |
+ |
+ EXPECT_TRUE(cache_impl()->IsAllocAllowed(0, kOneMB)); |
+ EXPECT_EQ(kOneMB * 2, cache_impl()->GetTotalBuffersSize()); |
+ |
+ EXPECT_TRUE(cache_impl()->IsAllocAllowed(0, kOneMB)); |
+ EXPECT_EQ(kOneMB * 3, cache_impl()->GetTotalBuffersSize()); |
+ |
+ cache_impl()->BufferDeleted(kOneMB); |
+ EXPECT_EQ(kOneMB * 2, cache_impl()->GetTotalBuffersSize()); |
+ |
+ // Check the upper limit. |
+ EXPECT_FALSE(cache_impl()->IsAllocAllowed(0, 30 * kOneMB)); |
+ |
+ for (int i = 0; i < 30; i++) |
+ cache_impl()->IsAllocAllowed(0, kOneMB); // Ignore the result. |
+ |
+ EXPECT_FALSE(cache_impl()->IsAllocAllowed(0, kOneMB)); |
+} |
+ |
+// Tests that sharing of external files works and we are able to delete the |
+// files when we need to. |
+TEST_P(DiskCacheBlockfileBackendTest, FileSharing) { |
+ InitCache(); |
+ |
+ disk_cache::Addr address(0x80000001); |
+ ASSERT_TRUE(cache_impl()->CreateExternalFile(&address)); |
+ base::FilePath name = cache_impl()->GetFileName(address); |
+ |
+ scoped_refptr<disk_cache::File> file(new disk_cache::File(false)); |
+ file->Init(name); |
+ |
+#if defined(OS_WIN) |
+ DWORD sharing = FILE_SHARE_READ | FILE_SHARE_WRITE; |
+ DWORD access = GENERIC_READ | GENERIC_WRITE; |
+ base::win::ScopedHandle file2(CreateFile( |
+ name.value().c_str(), access, sharing, NULL, OPEN_EXISTING, 0, NULL)); |
+ EXPECT_FALSE(file2.IsValid()); |
+ |
+ sharing |= FILE_SHARE_DELETE; |
+ file2.Set(CreateFile(name.value().c_str(), access, sharing, NULL, |
+ OPEN_EXISTING, 0, NULL)); |
+ EXPECT_TRUE(file2.IsValid()); |
+#endif |
+ |
+ EXPECT_TRUE(base::DeleteFile(name, false)); |
+ |
+ // We should be able to use the file. |
+ const int kSize = 200; |
+ char buffer1[kSize]; |
+ char buffer2[kSize]; |
+ memset(buffer1, 't', kSize); |
+ memset(buffer2, 0, kSize); |
+ EXPECT_TRUE(file->Write(buffer1, kSize, 0)); |
+ EXPECT_TRUE(file->Read(buffer2, kSize, 0)); |
+ EXPECT_EQ(0, memcmp(buffer1, buffer2, kSize)); |
+ |
+ EXPECT_TRUE(disk_cache::DeleteCacheFile(name)); |
+} |
+ |
+TEST_P(DiskCacheBlockfileBackendTest, UpdateRankForExternalCacheHit) { |
+ InitCache(); |
+ |
+ disk_cache::Entry* entry; |
+ |
+ for (int i = 0; i < 2; ++i) { |
+ std::string key = base::StringPrintf("key%d", i); |
+ ASSERT_EQ(net::OK, CreateEntry(key, &entry)); |
+ entry->Close(); |
+ } |
+ |
+ // Ping the oldest entry. |
+ cache()->OnExternalCacheHit("key0"); |
+ |
+ TrimForTest(false); |
+ |
+ // Make sure the older key remains. |
+ EXPECT_EQ(1, cache()->GetEntryCount()); |
+ ASSERT_EQ(net::OK, OpenEntry("key0", &entry)); |
+ entry->Close(); |
+} |
+ |
+} // namespace |
+ |
+} // namespace disk_cache |