| Index: net/disk_cache/backend_unittest.cc
|
| ===================================================================
|
| --- net/disk_cache/backend_unittest.cc (revision 199883)
|
| +++ net/disk_cache/backend_unittest.cc (working copy)
|
| @@ -28,15 +28,32 @@
|
| #include "net/disk_cache/tracing_cache_backend.h"
|
| #include "testing/gtest/include/gtest/gtest.h"
|
|
|
| -#if defined(OS_WIN)
|
| -#include "base/win/scoped_handle.h"
|
| -#endif
|
| -
|
| using base::Time;
|
|
|
| +namespace {
|
| +const int kDelayToNextTimestamp = 60;
|
| +}
|
| +
|
| // Tests that can run with different types of caches.
|
| class DiskCacheBackendTest : public DiskCacheTestWithCache {
|
| protected:
|
| + // Some utility methods:
|
| +
|
| + // Perform IO operations on the cache until there is pending IO.
|
| + int GeneratePendingIO(net::TestCompletionCallback* cb);
|
| +
|
| + // Adds 5 sparse entries. |doomed_start| and |doomed_end| if not NULL,
|
| + // will be filled with times, used by DoomEntriesSince and DoomEntriesBetween.
|
| + // There are 4 entries after doomed_start and 2 after doomed_end.
|
| + void InitSparseCache(base::Time* doomed_start, base::Time* doomed_end);
|
| +
|
| + bool CreateSetOfRandomEntries(std::set<std::string>* key_pool);
|
| + bool EnumerateAndMatchKeys(int max_to_open,
|
| + void** iter,
|
| + std::set<std::string>* keys_to_match,
|
| + size_t* count);
|
| +
|
| + // Actual tests:
|
| void BackendBasics();
|
| void BackendKeying();
|
| void BackendShutdownWithPendingFileIO(bool fast);
|
| @@ -45,6 +62,8 @@
|
| void BackendSetSize();
|
| void BackendLoad();
|
| void BackendChain();
|
| + void BucketUse();
|
| + void BackendNewEvictionTrim();
|
| void BackendValidEntry();
|
| void BackendInvalidEntry();
|
| void BackendInvalidEntryRead();
|
| @@ -56,12 +75,6 @@
|
| void BackendInvalidEntryEnumeration();
|
| void BackendFixEnumerators();
|
| void BackendDoomRecent();
|
| -
|
| - // Adds 5 sparse entries. |doomed_start| and |doomed_end| if not NULL,
|
| - // will be filled with times, used by DoomEntriesSince and DoomEntriesBetween.
|
| - // There are 4 entries after doomed_start and 2 after doomed_end.
|
| - void InitSparseCache(base::Time* doomed_start, base::Time* doomed_end);
|
| -
|
| void BackendDoomBetween();
|
| void BackendTransaction(const std::string& name, int num_entries, bool load);
|
| void BackendRecoverInsert();
|
| @@ -83,9 +96,158 @@
|
| void BackendDisable2();
|
| void BackendDisable3();
|
| void BackendDisable4();
|
| + void BackendTotalBuffersSize1();
|
| + void BackendTotalBuffersSize2();
|
| + void BackendUpdateRankForExternalCacheHit();
|
| void TracingBackendBasics();
|
| };
|
|
|
| +int DiskCacheBackendTest::GeneratePendingIO(net::TestCompletionCallback* cb) {
|
| + if (!use_current_thread_) {
|
| + ADD_FAILURE();
|
| + return net::ERR_FAILED;
|
| + }
|
| +
|
| + disk_cache::Entry* entry;
|
| + int rv = cache_->CreateEntry("some key", &entry, cb->callback());
|
| + if (cb->GetResult(rv) != net::OK)
|
| + return net::ERR_CACHE_CREATE_FAILURE;
|
| +
|
| + const int kSize = 25000;
|
| + scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
|
| + CacheTestFillBuffer(buffer->data(), kSize, false);
|
| +
|
| + for (int i = 0; i < 10 * 1024 * 1024; i += 64 * 1024) {
|
| + // We are using the current thread as the cache thread because we want to
|
| + // be able to call directly this method to make sure that the OS (instead
|
| + // of us switching thread) is returning IO pending.
|
| + if (!simple_cache_mode_) {
|
| + rv = static_cast<disk_cache::EntryImpl*>(entry)->WriteDataImpl(
|
| + 0, i, buffer.get(), kSize, cb->callback(), false);
|
| + } else {
|
| + rv = entry->WriteData(0, i, buffer.get(), kSize, cb->callback(), false);
|
| + }
|
| +
|
| + if (rv == net::ERR_IO_PENDING)
|
| + break;
|
| + if (rv != kSize)
|
| + rv = net::ERR_FAILED;
|
| + }
|
| +
|
| + // Don't call Close() to avoid going through the queue or we'll deadlock
|
| + // waiting for the operation to finish.
|
| + if (!simple_cache_mode_)
|
| + static_cast<disk_cache::EntryImpl*>(entry)->Release();
|
| + else
|
| + entry->Close();
|
| +
|
| + return rv;
|
| +}
|
| +
|
| +void DiskCacheBackendTest::InitSparseCache(base::Time* doomed_start,
|
| + base::Time* doomed_end) {
|
| + InitCache();
|
| +
|
| + const int kSize = 50;
|
| + // This must be greater then MemEntryImpl::kMaxSparseEntrySize.
|
| + const int kOffset = 10 + 1024 * 1024;
|
| +
|
| + disk_cache::Entry* entry0 = NULL;
|
| + disk_cache::Entry* entry1 = NULL;
|
| + disk_cache::Entry* entry2 = NULL;
|
| +
|
| + scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
|
| + CacheTestFillBuffer(buffer->data(), kSize, false);
|
| +
|
| + ASSERT_EQ(net::OK, CreateEntry("zeroth", &entry0));
|
| + ASSERT_EQ(kSize, WriteSparseData(entry0, 0, buffer.get(), kSize));
|
| + ASSERT_EQ(kSize,
|
| + WriteSparseData(entry0, kOffset + kSize, buffer.get(), kSize));
|
| + entry0->Close();
|
| +
|
| + FlushQueueForTest();
|
| + AddDelay();
|
| + if (doomed_start)
|
| + *doomed_start = base::Time::Now();
|
| +
|
| + // Order in rankings list:
|
| + // first_part1, first_part2, second_part1, second_part2
|
| + ASSERT_EQ(net::OK, CreateEntry("first", &entry1));
|
| + ASSERT_EQ(kSize, WriteSparseData(entry1, 0, buffer.get(), kSize));
|
| + ASSERT_EQ(kSize,
|
| + WriteSparseData(entry1, kOffset + kSize, buffer.get(), kSize));
|
| + entry1->Close();
|
| +
|
| + ASSERT_EQ(net::OK, CreateEntry("second", &entry2));
|
| + ASSERT_EQ(kSize, WriteSparseData(entry2, 0, buffer.get(), kSize));
|
| + ASSERT_EQ(kSize,
|
| + WriteSparseData(entry2, kOffset + kSize, buffer.get(), kSize));
|
| + entry2->Close();
|
| +
|
| + FlushQueueForTest();
|
| + AddDelay();
|
| + if (doomed_end)
|
| + *doomed_end = base::Time::Now();
|
| +
|
| + // Order in rankings list:
|
| + // third_part1, fourth_part1, third_part2, fourth_part2
|
| + disk_cache::Entry* entry3 = NULL;
|
| + disk_cache::Entry* entry4 = NULL;
|
| + ASSERT_EQ(net::OK, CreateEntry("third", &entry3));
|
| + ASSERT_EQ(kSize, WriteSparseData(entry3, 0, buffer.get(), kSize));
|
| + ASSERT_EQ(net::OK, CreateEntry("fourth", &entry4));
|
| + ASSERT_EQ(kSize, WriteSparseData(entry4, 0, buffer.get(), kSize));
|
| + ASSERT_EQ(kSize,
|
| + WriteSparseData(entry3, kOffset + kSize, buffer.get(), kSize));
|
| + ASSERT_EQ(kSize,
|
| + WriteSparseData(entry4, kOffset + kSize, buffer.get(), kSize));
|
| + entry3->Close();
|
| + entry4->Close();
|
| +
|
| + FlushQueueForTest();
|
| + AddDelay();
|
| +}
|
| +
|
| +// Creates entries based on random keys. Stores these keys in |key_pool|.
|
| +bool DiskCacheBackendTest::CreateSetOfRandomEntries(
|
| + std::set<std::string>* key_pool) {
|
| + const int kNumEntries = 10;
|
| +
|
| + for (int i = 0; i < kNumEntries; ++i) {
|
| + std::string key = GenerateKey(true);
|
| + disk_cache::Entry* entry;
|
| + if (CreateEntry(key, &entry) != net::OK)
|
| + return false;
|
| + key_pool->insert(key);
|
| + entry->Close();
|
| + }
|
| + return key_pool->size() == implicit_cast<size_t>(cache_->GetEntryCount());
|
| +}
|
| +
|
| +// Performs iteration over the backend and checks that the keys of entries
|
| +// opened are in |keys_to_match|, then erases them. Up to |max_to_open| entries
|
| +// will be opened, if it is positive. Otherwise, iteration will continue until
|
| +// OpenNextEntry stops returning net::OK.
|
| +bool DiskCacheBackendTest::EnumerateAndMatchKeys(
|
| + int max_to_open,
|
| + void** iter,
|
| + std::set<std::string>* keys_to_match,
|
| + size_t* count) {
|
| + disk_cache::Entry* entry;
|
| +
|
| + while (OpenNextEntry(iter, &entry) == net::OK) {
|
| + if (!entry)
|
| + return false;
|
| + EXPECT_EQ(1U, keys_to_match->erase(entry->GetKey()));
|
| + entry->Close();
|
| + ++(*count);
|
| + if (max_to_open >= 0 && implicit_cast<int>(*count) >= max_to_open)
|
| + break;
|
| + };
|
| +
|
| + return true;
|
| +}
|
| +
|
| void DiskCacheBackendTest::BackendBasics() {
|
| InitCache();
|
| disk_cache::Entry *entry1 = NULL, *entry2 = NULL;
|
| @@ -156,6 +318,11 @@
|
| BackendBasics();
|
| }
|
|
|
| +TEST_F(DiskCacheBackendTest, V3Basics) {
|
| + UseVersion3();
|
| + BackendBasics();
|
| +}
|
| +
|
| void DiskCacheBackendTest::BackendKeying() {
|
| InitCache();
|
| const char* kName1 = "the first key";
|
| @@ -201,11 +368,22 @@
|
| BackendKeying();
|
| }
|
|
|
| +TEST_F(DiskCacheBackendTest, V3Keying) {
|
| + UseVersion3();
|
| + BackendKeying();
|
| +}
|
| +
|
| TEST_F(DiskCacheBackendTest, NewEvictionKeying) {
|
| SetNewEviction();
|
| BackendKeying();
|
| }
|
|
|
| +TEST_F(DiskCacheBackendTest, V3NewEvictionKeying) {
|
| + UseVersion3();
|
| + SetNewEviction();
|
| + BackendKeying();
|
| +}
|
| +
|
| TEST_F(DiskCacheBackendTest, MemoryOnlyKeying) {
|
| SetMemoryOnlyMode();
|
| BackendKeying();
|
| @@ -295,7 +473,7 @@
|
| // Now let's create a file with the cache.
|
| disk_cache::Entry* entry;
|
| ASSERT_EQ(net::OK, CreateEntry("key", &entry));
|
| - ASSERT_EQ(0, WriteData(entry, 0, 20000, buffer1, 0, false));
|
| + ASSERT_EQ(0, WriteData(entry, 0, 20000, buffer1.get(), 0, false));
|
| entry->Close();
|
|
|
| // And verify that the first file is still there.
|
| @@ -306,68 +484,34 @@
|
|
|
| // Tests that we deal with file-level pending operations at destruction time.
|
| void DiskCacheBackendTest::BackendShutdownWithPendingFileIO(bool fast) {
|
| - net::TestCompletionCallback cb;
|
| - int rv;
|
| + ASSERT_TRUE(CleanupCacheDir());
|
| + if (fast)
|
| + AvoidTestFlag();
|
|
|
| - {
|
| - ASSERT_TRUE(CleanupCacheDir());
|
| - base::Thread cache_thread("CacheThread");
|
| - ASSERT_TRUE(cache_thread.StartWithOptions(
|
| - base::Thread::Options(MessageLoop::TYPE_IO, 0)));
|
| + UseCurrentThread();
|
| + CreateBackend(NULL);
|
| + SetNoBuffering();
|
|
|
| - uint32 flags = disk_cache::kNoBuffering;
|
| - if (!fast)
|
| - flags |= disk_cache::kNoRandom;
|
| + net::TestCompletionCallback cb;
|
| + int rv = GeneratePendingIO(&cb);
|
|
|
| - UseCurrentThread();
|
| - CreateBackend(flags, NULL);
|
| + // The cache destructor will see one pending operation here.
|
| + delete cache_;
|
| + // Prevent the TearDown() to delete the backend again.
|
| + cache_ = NULL;
|
|
|
| - disk_cache::EntryImpl* entry;
|
| - rv = cache_->CreateEntry(
|
| - "some key", reinterpret_cast<disk_cache::Entry**>(&entry),
|
| - cb.callback());
|
| - ASSERT_EQ(net::OK, cb.GetResult(rv));
|
| -
|
| - const int kSize = 25000;
|
| - scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
|
| - CacheTestFillBuffer(buffer->data(), kSize, false);
|
| -
|
| - for (int i = 0; i < 10 * 1024 * 1024; i += 64 * 1024) {
|
| - // We are using the current thread as the cache thread because we want to
|
| - // be able to call directly this method to make sure that the OS (instead
|
| - // of us switching thread) is returning IO pending.
|
| - rv = entry->WriteDataImpl(0, i, buffer, kSize, cb.callback(), false);
|
| - if (rv == net::ERR_IO_PENDING)
|
| - break;
|
| - EXPECT_EQ(kSize, rv);
|
| - }
|
| -
|
| - // Don't call Close() to avoid going through the queue or we'll deadlock
|
| - // waiting for the operation to finish.
|
| - entry->Release();
|
| -
|
| - // The cache destructor will see one pending operation here.
|
| - delete cache_;
|
| - // Prevent the TearDown() to delete the backend again.
|
| - cache_ = NULL;
|
| -
|
| - if (rv == net::ERR_IO_PENDING) {
|
| - if (fast)
|
| - EXPECT_FALSE(cb.have_result());
|
| - else
|
| - EXPECT_TRUE(cb.have_result());
|
| - }
|
| + if (rv == net::ERR_IO_PENDING) {
|
| + if (fast)
|
| + EXPECT_FALSE(cb.have_result());
|
| + else
|
| + EXPECT_TRUE(cb.have_result());
|
| }
|
|
|
| MessageLoop::current()->RunUntilIdle();
|
|
|
| -#if defined(OS_WIN)
|
| // Wait for the actual operation to complete, or we'll keep a file handle that
|
| - // may cause issues later. Note that on Posix systems even though this test
|
| - // uses a single thread, the actual IO is posted to a worker thread and the
|
| - // cache destructor breaks the link to reach cb when the operation completes.
|
| + // may cause issues later.
|
| rv = cb.GetResult(rv);
|
| -#endif
|
| }
|
|
|
| TEST_F(DiskCacheBackendTest, ShutdownWithPendingFileIO) {
|
| @@ -392,11 +536,11 @@
|
| ASSERT_TRUE(cache_thread.StartWithOptions(
|
| base::Thread::Options(MessageLoop::TYPE_IO, 0)));
|
|
|
| - uint32 flags = disk_cache::kNoBuffering;
|
| - if (!fast)
|
| - flags |= disk_cache::kNoRandom;
|
| + if (fast)
|
| + AvoidTestFlag();
|
|
|
| - CreateBackend(flags, &cache_thread);
|
| + CreateBackend(&cache_thread);
|
| + SetNoBuffering();
|
|
|
| disk_cache::Entry* entry;
|
| int rv = cache_->CreateEntry("some key", &entry, cb.callback());
|
| @@ -435,9 +579,9 @@
|
| ASSERT_TRUE(cache_thread.StartWithOptions(
|
| base::Thread::Options(MessageLoop::TYPE_IO, 0)));
|
|
|
| - disk_cache::BackendFlags flags =
|
| - fast ? disk_cache::kNone : disk_cache::kNoRandom;
|
| - CreateBackend(flags, &cache_thread);
|
| + if (fast)
|
| + AvoidTestFlag();
|
| + CreateBackend(&cache_thread);
|
|
|
| disk_cache::Entry* entry;
|
| int rv = cache_->CreateEntry("some key", &entry, cb.callback());
|
| @@ -496,20 +640,22 @@
|
|
|
| scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(cache_size));
|
| memset(buffer->data(), 0, cache_size);
|
| - EXPECT_EQ(cache_size / 10, WriteData(entry, 0, 0, buffer, cache_size / 10,
|
| - false)) << "normal file";
|
| + EXPECT_EQ(cache_size / 10,
|
| + WriteData(entry, 0, 0, buffer.get(), cache_size / 10, false)) <<
|
| + "normal file";
|
|
|
| - EXPECT_EQ(net::ERR_FAILED, WriteData(entry, 1, 0, buffer, cache_size / 5,
|
| - false)) << "file size above the limit";
|
| + EXPECT_EQ(net::ERR_FAILED,
|
| + WriteData(entry, 1, 0, buffer.get(), cache_size / 5, false)) <<
|
| + "file size above the limit";
|
|
|
| // By doubling the total size, we make this file cacheable.
|
| SetMaxSize(cache_size * 2);
|
| - EXPECT_EQ(cache_size / 5, WriteData(entry, 1, 0, buffer, cache_size / 5,
|
| + EXPECT_EQ(cache_size / 5, WriteData(entry, 1, 0, buffer.get(), cache_size / 5,
|
| false));
|
|
|
| // Let's fill up the cache!.
|
| SetMaxSize(cache_size * 10);
|
| - EXPECT_EQ(cache_size * 3 / 4, WriteData(entry, 0, 0, buffer,
|
| + EXPECT_EQ(cache_size * 3 / 4, WriteData(entry, 0, 0, buffer.get(),
|
| cache_size * 3 / 4, false));
|
| entry->Close();
|
| FlushQueueForTest();
|
| @@ -519,21 +665,24 @@
|
| // The cache is 95% full.
|
|
|
| ASSERT_EQ(net::OK, CreateEntry(second, &entry));
|
| - EXPECT_EQ(cache_size / 10, WriteData(entry, 0, 0, buffer, cache_size / 10,
|
| - false));
|
| + EXPECT_EQ(cache_size / 10,
|
| + WriteData(entry, 0, 0, buffer.get(), cache_size / 10, false));
|
|
|
| disk_cache::Entry* entry2;
|
| - ASSERT_EQ(net::OK, CreateEntry("an extra key", &entry2));
|
| - EXPECT_EQ(cache_size / 10, WriteData(entry2, 0, 0, buffer, cache_size / 10,
|
| - false));
|
| + std::string extra_key("an extra key");
|
| + ASSERT_EQ(net::OK, CreateEntry(extra_key, &entry2));
|
| + EXPECT_EQ(cache_size / 10, WriteData(entry2, 0, 0, buffer.get(),
|
| + cache_size / 10, false));
|
| entry2->Close(); // This will trigger the cache trim.
|
| + WaitForEntryToClose(extra_key);
|
|
|
| - EXPECT_NE(net::OK, OpenEntry(first, &entry2));
|
| -
|
| FlushQueueForTest(); // Make sure that we are done trimming the cache.
|
| FlushQueueForTest(); // We may have posted two tasks to evict stuff.
|
|
|
| entry->Close();
|
| + WaitForEntryToClose(second);
|
| +
|
| + EXPECT_NE(net::OK, OpenEntry(first, &entry2));
|
| ASSERT_EQ(net::OK, OpenEntry(second, &entry));
|
| EXPECT_EQ(cache_size / 10, entry->GetDataSize(0));
|
| entry->Close();
|
| @@ -543,11 +692,24 @@
|
| BackendSetSize();
|
| }
|
|
|
| +TEST_F(DiskCacheBackendTest, V3SetSize) {
|
| + UseVersion3();
|
| + UseVersion3();
|
| + SetNewEviction();
|
| + BackendSetSize();
|
| +}
|
| +
|
| TEST_F(DiskCacheBackendTest, NewEvictionSetSize) {
|
| SetNewEviction();
|
| BackendSetSize();
|
| }
|
|
|
| +TEST_F(DiskCacheBackendTest, V3NewEvictionSetSize) {
|
| + UseVersion3();
|
| + SetNewEviction();
|
| + BackendSetSize();
|
| +}
|
| +
|
| TEST_F(DiskCacheBackendTest, MemoryOnlySetSize) {
|
| SetMemoryOnlyMode();
|
| BackendSetSize();
|
| @@ -654,7 +816,101 @@
|
| BackendChain();
|
| }
|
|
|
| -TEST_F(DiskCacheBackendTest, NewEvictionTrim) {
|
| +// Tests the proper use of cell buckets while the index grow.
|
| +void DiskCacheBackendTest::BucketUse() {
|
| + UseVersion3();
|
| + InitCache();
|
| + const int kNumEntries = 20;
|
| + disk_cache::Entry* entries[kNumEntries];
|
| + std::string key("The first key");
|
| +
|
| + // This generates kNumEntries collisions so the extra table has to be used.
|
| + // There are only 2 extra buckets on the test setup, so the index has to grow.
|
| + for (int i = 0; i < kNumEntries; i++) {
|
| + ASSERT_EQ(net::OK, CreateEntry(key, &entries[i]));
|
| + entries[i]->Doom();
|
| + FlushQueueForTest();
|
| + }
|
| +
|
| + for (int i = 0; i < kNumEntries / 2; i++)
|
| + entries[i]->Close();
|
| +
|
| + for (int i = 0; i < kNumEntries / 2; i++) {
|
| + ASSERT_EQ(net::OK, CreateEntry(key, &entries[i]));
|
| + entries[i]->Doom();
|
| + }
|
| +
|
| + for (int i = 0; i < kNumEntries; i++)
|
| + entries[i]->Close();
|
| +}
|
| +
|
| +TEST_F(DiskCacheBackendTest, V3BucketUse) {
|
| + BucketUse();
|
| +}
|
| +
|
| +TEST_F(DiskCacheBackendTest, V3DoubleIndexBucketUse) {
|
| + PresetTestMode(); // Doubles the index instead of a slow growth.
|
| + BucketUse();
|
| +}
|
| +
|
| +TEST_F(DiskCacheBackendTest, V3DoubleIndex) {
|
| + UseVersion3();
|
| + PresetTestMode();
|
| + InitCache();
|
| +
|
| + disk_cache::Entry* entry;
|
| + const int kNumEntries = 200;
|
| + for (int i = 0; i < kNumEntries; i++) {
|
| + std::string name(base::StringPrintf("Key %d", i));
|
| + ASSERT_EQ(net::OK, CreateEntry(name, &entry));
|
| + entry->Close();
|
| + FlushQueueForTest();
|
| + }
|
| +
|
| + // Generate enough collisions to force growing the extra table (and with it,
|
| + // the index).
|
| + const int kNumSavedEntries = 5;
|
| + disk_cache::Entry* entries[kNumSavedEntries];
|
| + std::string key("The first key");
|
| + for (int i = 0; i < kNumSavedEntries; i++) {
|
| + ASSERT_EQ(net::OK, CreateEntry(key, &entries[i]));
|
| + entries[i]->Doom();
|
| + FlushQueueForTest();
|
| + }
|
| +
|
| + for (int i = 0; i < kNumSavedEntries; i++)
|
| + entries[i]->Close();
|
| +
|
| + // Verify that all entries are there.
|
| + for (int i = 0; i < kNumEntries; i++) {
|
| + std::string name(base::StringPrintf("Key %d", i));
|
| + ASSERT_EQ(net::OK, OpenEntry(name, &entry));
|
| + entry->Close();
|
| + }
|
| +}
|
| +
|
| +// This test leaks memory because it simulates a crash.
|
| +TEST_F(DiskCacheBackendTest, DISABLED_V3Backup) {
|
| + UseVersion3();
|
| + PresetTestMode();
|
| + InitCache();
|
| +
|
| + disk_cache::Entry* entry;
|
| + std::string name1("First entry");
|
| + ASSERT_EQ(net::OK, CreateEntry(name1, &entry));
|
| + entry->Close();
|
| + WaitForEntryToClose(name1);
|
| +
|
| + std::string name2("Another entry");
|
| + ASSERT_EQ(net::OK, CreateEntry(name2, &entry));
|
| +
|
| + SimulateCrash();
|
| + EXPECT_EQ(net::OK, OpenEntry(name1, &entry));
|
| + entry->Close();
|
| + EXPECT_NE(net::OK, OpenEntry(name2, &entry));
|
| +}
|
| +
|
| +void DiskCacheBackendTest::BackendNewEvictionTrim() {
|
| SetNewEviction();
|
| InitCache();
|
|
|
| @@ -663,11 +919,18 @@
|
| std::string name(base::StringPrintf("Key %d", i));
|
| ASSERT_EQ(net::OK, CreateEntry(name, &entry));
|
| entry->Close();
|
| + AddDelayForTest(kDelayToNextTimestamp);
|
| +
|
| if (i < 90) {
|
| // Entries 0 to 89 are in list 1; 90 to 99 are in list 0.
|
| ASSERT_EQ(net::OK, OpenEntry(name, &entry));
|
| entry->Close();
|
| }
|
| + if (!(i % 10))
|
| + FlushQueueForTest();
|
| +
|
| + if (i == 0 || i == 90)
|
| + WaitForEntryToClose(name);
|
| }
|
|
|
| // The first eviction must come from list 1 (10% limit), the second must come
|
| @@ -684,6 +947,15 @@
|
| entry->Close();
|
| }
|
|
|
| +TEST_F(DiskCacheBackendTest, NewEvictionTrim) {
|
| + BackendNewEvictionTrim();
|
| +}
|
| +
|
| +TEST_F(DiskCacheBackendTest, V3NewEvictionTrim) {
|
| + UseVersion3();
|
| + BackendNewEvictionTrim();
|
| +}
|
| +
|
| // Before looking for invalid entries, let's check a valid entry.
|
| void DiskCacheBackendTest::BackendValidEntry() {
|
| InitCache();
|
| @@ -696,7 +968,7 @@
|
| scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize));
|
| memset(buffer1->data(), 0, kSize);
|
| base::strlcpy(buffer1->data(), "And the data to save", kSize);
|
| - EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer1, kSize, false));
|
| + EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer1.get(), kSize, false));
|
| entry->Close();
|
| SimulateCrash();
|
|
|
| @@ -704,7 +976,7 @@
|
|
|
| scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize));
|
| memset(buffer2->data(), 0, kSize);
|
| - EXPECT_EQ(kSize, ReadData(entry, 0, 0, buffer2, kSize));
|
| + EXPECT_EQ(kSize, ReadData(entry, 0, 0, buffer2.get(), kSize));
|
| entry->Close();
|
| EXPECT_STREQ(buffer1->data(), buffer2->data());
|
| }
|
| @@ -732,18 +1004,15 @@
|
| scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
|
| memset(buffer->data(), 0, kSize);
|
| base::strlcpy(buffer->data(), "And the data to save", kSize);
|
| - EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer, kSize, false));
|
| + EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false));
|
| SimulateCrash();
|
|
|
| EXPECT_NE(net::OK, OpenEntry(key, &entry));
|
| EXPECT_EQ(0, cache_->GetEntryCount());
|
| }
|
|
|
| -// This and the other intentionally leaky tests below are excluded from
|
| -// valgrind runs by naming them in the files
|
| -// net/data/valgrind/net_unittests.gtest.txt
|
| -// The scripts tools/valgrind/chrome_tests.sh
|
| -// read those files and pass the appropriate --gtest_filter to net_unittests.
|
| +#if !defined(LEAK_SANITIZER)
|
| +// We'll be leaking memory from this test.
|
| TEST_F(DiskCacheBackendTest, InvalidEntry) {
|
| BackendInvalidEntry();
|
| }
|
| @@ -779,10 +1048,10 @@
|
| scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
|
| memset(buffer->data(), 0, kSize);
|
| base::strlcpy(buffer->data(), "And the data to save", kSize);
|
| - EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer, kSize, false));
|
| + EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false));
|
| entry->Close();
|
| ASSERT_EQ(net::OK, OpenEntry(key, &entry));
|
| - EXPECT_EQ(kSize, ReadData(entry, 0, 0, buffer, kSize));
|
| + EXPECT_EQ(kSize, ReadData(entry, 0, 0, buffer.get(), kSize));
|
|
|
| SimulateCrash();
|
|
|
| @@ -905,13 +1174,13 @@
|
|
|
| scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
|
| memset(buffer->data(), 0, kSize);
|
| - EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer, kSize, false));
|
| + EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false));
|
|
|
| // Simulate a crash.
|
| SimulateCrash();
|
|
|
| ASSERT_EQ(net::OK, CreateEntry(second, &entry));
|
| - EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer, kSize, false));
|
| + EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false));
|
|
|
| EXPECT_EQ(2, cache_->GetEntryCount());
|
| SetMaxSize(kSize);
|
| @@ -960,7 +1229,7 @@
|
| for (int i = 0; i < 32; i++) {
|
| std::string key(base::StringPrintf("some key %d", i));
|
| ASSERT_EQ(net::OK, CreateEntry(key, &entry));
|
| - EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer, kSize, false));
|
| + EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false));
|
| entry->Close();
|
| ASSERT_EQ(net::OK, OpenEntry(key, &entry));
|
| // Note that we are not closing the entries.
|
| @@ -970,7 +1239,7 @@
|
| SimulateCrash();
|
|
|
| ASSERT_EQ(net::OK, CreateEntry("Something else", &entry));
|
| - EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer, kSize, false));
|
| + EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false));
|
|
|
| FlushQueueForTest();
|
| EXPECT_EQ(33, cache_->GetEntryCount());
|
| @@ -1005,6 +1274,7 @@
|
| SetNewEviction();
|
| BackendTrimInvalidEntry2();
|
| }
|
| +#endif // !defined(LEAK_SANITIZER)
|
|
|
| void DiskCacheBackendTest::BackendEnumerations() {
|
| InitCache();
|
| @@ -1018,6 +1288,8 @@
|
| disk_cache::Entry* entry;
|
| ASSERT_EQ(net::OK, CreateEntry(key, &entry));
|
| entry->Close();
|
| + if (!(i % 10))
|
| + FlushQueueForTest();
|
| }
|
| EXPECT_EQ(kNumEntries, cache_->GetEntryCount());
|
| Time final = Time::Now();
|
| @@ -1060,11 +1332,22 @@
|
| BackendEnumerations();
|
| }
|
|
|
| +TEST_F(DiskCacheBackendTest, V3Enumerations) {
|
| + UseVersion3();
|
| + BackendEnumerations();
|
| +}
|
| +
|
| TEST_F(DiskCacheBackendTest, NewEvictionEnumerations) {
|
| SetNewEviction();
|
| BackendEnumerations();
|
| }
|
|
|
| +TEST_F(DiskCacheBackendTest, V3NewEvictionEnumerations) {
|
| + UseVersion3();
|
| + SetNewEviction();
|
| + BackendEnumerations();
|
| +}
|
| +
|
| TEST_F(DiskCacheBackendTest, MemoryOnlyEnumerations) {
|
| SetMemoryOnlyMode();
|
| BackendEnumerations();
|
| @@ -1093,7 +1376,7 @@
|
| FlushQueueForTest();
|
|
|
| // Make sure that the timestamp is not the same.
|
| - AddDelay();
|
| + AddDelayForTest(kDelayToNextTimestamp);
|
| ASSERT_EQ(net::OK, OpenEntry(second, &entry1));
|
| void* iter = NULL;
|
| ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry2));
|
| @@ -1129,11 +1412,22 @@
|
| BackendEnumerations2();
|
| }
|
|
|
| +TEST_F(DiskCacheBackendTest, V3Enumerations2) {
|
| + UseVersion3();
|
| + BackendEnumerations2();
|
| +}
|
| +
|
| TEST_F(DiskCacheBackendTest, NewEvictionEnumerations2) {
|
| SetNewEviction();
|
| BackendEnumerations2();
|
| }
|
|
|
| +TEST_F(DiskCacheBackendTest, V3NewEvictionEnumerations2) {
|
| + UseVersion3();
|
| + SetNewEviction();
|
| + BackendEnumerations2();
|
| +}
|
| +
|
| TEST_F(DiskCacheBackendTest, MemoryOnlyEnumerations2) {
|
| SetMemoryOnlyMode();
|
| BackendEnumerations2();
|
| @@ -1163,7 +1457,7 @@
|
| ASSERT_EQ(net::OK, CreateEntry(first, &entry1));
|
| memset(buffer1->data(), 0, kSize);
|
| base::strlcpy(buffer1->data(), "And the data to save", kSize);
|
| - EXPECT_EQ(kSize, WriteData(entry1, 0, 0, buffer1, kSize, false));
|
| + EXPECT_EQ(kSize, WriteData(entry1, 0, 0, buffer1.get(), kSize, false));
|
|
|
| ASSERT_EQ(net::OK, CreateEntry(second, &entry2));
|
| entry2->Close();
|
| @@ -1174,7 +1468,7 @@
|
| AddDelay();
|
|
|
| // Read from the last item in the LRU.
|
| - EXPECT_EQ(kSize, ReadData(entry1, 0, 0, buffer1, kSize));
|
| + EXPECT_EQ(kSize, ReadData(entry1, 0, 0, buffer1.get(), kSize));
|
| entry1->Close();
|
|
|
| void* iter = NULL;
|
| @@ -1184,6 +1478,7 @@
|
| cache_->EndEnumeration(&iter);
|
| }
|
|
|
| +#if !defined(LEAK_SANITIZER)
|
| // Verify handling of invalid entries while doing enumerations.
|
| // We'll be leaking memory from this test.
|
| void DiskCacheBackendTest::BackendInvalidEntryEnumeration() {
|
| @@ -1197,10 +1492,10 @@
|
| scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize));
|
| memset(buffer1->data(), 0, kSize);
|
| base::strlcpy(buffer1->data(), "And the data to save", kSize);
|
| - EXPECT_EQ(kSize, WriteData(entry1, 0, 0, buffer1, kSize, false));
|
| + EXPECT_EQ(kSize, WriteData(entry1, 0, 0, buffer1.get(), kSize, false));
|
| entry1->Close();
|
| ASSERT_EQ(net::OK, OpenEntry(key, &entry1));
|
| - EXPECT_EQ(kSize, ReadData(entry1, 0, 0, buffer1, kSize));
|
| + EXPECT_EQ(kSize, ReadData(entry1, 0, 0, buffer1.get(), kSize));
|
|
|
| std::string key2("Another key");
|
| ASSERT_EQ(net::OK, CreateEntry(key2, &entry2));
|
| @@ -1231,6 +1526,7 @@
|
| SetNewEviction();
|
| BackendInvalidEntryEnumeration();
|
| }
|
| +#endif // !defined(LEAK_SANITIZER)
|
|
|
| // Tests that if for some reason entries are modified close to existing cache
|
| // iterators, we don't generate fatal errors or reset the cache.
|
| @@ -1307,8 +1603,8 @@
|
| entry->Close();
|
| FlushQueueForTest();
|
|
|
| - AddDelay();
|
| - Time middle = Time::Now();
|
| + AddDelayForTest(kDelayToNextTimestamp);
|
| + Time middle = GetTime();
|
|
|
| ASSERT_EQ(net::OK, CreateEntry("third", &entry));
|
| entry->Close();
|
| @@ -1316,8 +1612,8 @@
|
| entry->Close();
|
| FlushQueueForTest();
|
|
|
| - AddDelay();
|
| - Time final = Time::Now();
|
| + AddDelayForTest(kDelayToNextTimestamp);
|
| + Time final = GetTime();
|
|
|
| ASSERT_EQ(4, cache_->GetEntryCount());
|
| EXPECT_EQ(net::OK, DoomEntriesSince(final));
|
| @@ -1334,80 +1630,27 @@
|
| BackendDoomRecent();
|
| }
|
|
|
| +TEST_F(DiskCacheBackendTest, V3DoomRecent) {
|
| + UseVersion3();
|
| + BackendDoomRecent();
|
| +}
|
| +
|
| TEST_F(DiskCacheBackendTest, NewEvictionDoomRecent) {
|
| SetNewEviction();
|
| BackendDoomRecent();
|
| }
|
|
|
| +TEST_F(DiskCacheBackendTest, V3NewEvictionDoomRecent) {
|
| + UseVersion3();
|
| + SetNewEviction();
|
| + BackendDoomRecent();
|
| +}
|
| +
|
| TEST_F(DiskCacheBackendTest, MemoryOnlyDoomRecent) {
|
| SetMemoryOnlyMode();
|
| BackendDoomRecent();
|
| }
|
|
|
| -void DiskCacheBackendTest::InitSparseCache(base::Time* doomed_start,
|
| - base::Time* doomed_end) {
|
| - InitCache();
|
| -
|
| - const int kSize = 50;
|
| - // This must be greater then MemEntryImpl::kMaxSparseEntrySize.
|
| - const int kOffset = 10 + 1024 * 1024;
|
| -
|
| - disk_cache::Entry* entry0 = NULL;
|
| - disk_cache::Entry* entry1 = NULL;
|
| - disk_cache::Entry* entry2 = NULL;
|
| -
|
| - scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
|
| - CacheTestFillBuffer(buffer->data(), kSize, false);
|
| -
|
| - ASSERT_EQ(net::OK, CreateEntry("zeroth", &entry0));
|
| - ASSERT_EQ(kSize, WriteSparseData(entry0, 0, buffer.get(), kSize));
|
| - ASSERT_EQ(kSize,
|
| - WriteSparseData(entry0, kOffset + kSize, buffer.get(), kSize));
|
| - entry0->Close();
|
| -
|
| - FlushQueueForTest();
|
| - AddDelay();
|
| - if (doomed_start)
|
| - *doomed_start = base::Time::Now();
|
| -
|
| - // Order in rankings list:
|
| - // first_part1, first_part2, second_part1, second_part2
|
| - ASSERT_EQ(net::OK, CreateEntry("first", &entry1));
|
| - ASSERT_EQ(kSize, WriteSparseData(entry1, 0, buffer.get(), kSize));
|
| - ASSERT_EQ(kSize,
|
| - WriteSparseData(entry1, kOffset + kSize, buffer.get(), kSize));
|
| - entry1->Close();
|
| -
|
| - ASSERT_EQ(net::OK, CreateEntry("second", &entry2));
|
| - ASSERT_EQ(kSize, WriteSparseData(entry2, 0, buffer.get(), kSize));
|
| - ASSERT_EQ(kSize,
|
| - WriteSparseData(entry2, kOffset + kSize, buffer.get(), kSize));
|
| - entry2->Close();
|
| -
|
| - FlushQueueForTest();
|
| - AddDelay();
|
| - if (doomed_end)
|
| - *doomed_end = base::Time::Now();
|
| -
|
| - // Order in rankings list:
|
| - // third_part1, fourth_part1, third_part2, fourth_part2
|
| - disk_cache::Entry* entry3 = NULL;
|
| - disk_cache::Entry* entry4 = NULL;
|
| - ASSERT_EQ(net::OK, CreateEntry("third", &entry3));
|
| - ASSERT_EQ(kSize, WriteSparseData(entry3, 0, buffer.get(), kSize));
|
| - ASSERT_EQ(net::OK, CreateEntry("fourth", &entry4));
|
| - ASSERT_EQ(kSize, WriteSparseData(entry4, 0, buffer.get(), kSize));
|
| - ASSERT_EQ(kSize,
|
| - WriteSparseData(entry3, kOffset + kSize, buffer.get(), kSize));
|
| - ASSERT_EQ(kSize,
|
| - WriteSparseData(entry4, kOffset + kSize, buffer.get(), kSize));
|
| - entry3->Close();
|
| - entry4->Close();
|
| -
|
| - FlushQueueForTest();
|
| - AddDelay();
|
| -}
|
| -
|
| TEST_F(DiskCacheBackendTest, MemoryOnlyDoomEntriesSinceSparse) {
|
| SetMemoryOnlyMode();
|
| base::Time start;
|
| @@ -1426,6 +1669,17 @@
|
| EXPECT_EQ(3, cache_->GetEntryCount());
|
| }
|
|
|
| +TEST_F(DiskCacheBackendTest, V3DoomEntriesSinceSparse) {
|
| + base::Time start;
|
| + UseVersion3();
|
| + InitSparseCache(&start, NULL);
|
| + DoomEntriesSince(start);
|
| + // NOTE: BackendImpl counts child entries in its GetEntryCount(), while
|
| + // MemBackendImpl does not. Thats why expected value differs here from
|
| + // MemoryOnlyDoomEntriesSinceSparse.
|
| + EXPECT_EQ(3, cache_->GetEntryCount());
|
| +}
|
| +
|
| TEST_F(DiskCacheBackendTest, MemoryOnlyDoomAllSparse) {
|
| SetMemoryOnlyMode();
|
| InitSparseCache(NULL, NULL);
|
| @@ -1439,6 +1693,13 @@
|
| EXPECT_EQ(0, cache_->GetEntryCount());
|
| }
|
|
|
| +TEST_F(DiskCacheBackendTest, V3DoomAllSparse) {
|
| + UseVersion3();
|
| + InitSparseCache(NULL, NULL);
|
| + EXPECT_EQ(net::OK, DoomAllEntries());
|
| + EXPECT_EQ(0, cache_->GetEntryCount());
|
| +}
|
| +
|
| void DiskCacheBackendTest::BackendDoomBetween() {
|
| InitCache();
|
|
|
| @@ -1486,11 +1747,22 @@
|
| BackendDoomBetween();
|
| }
|
|
|
| +TEST_F(DiskCacheBackendTest, V3DoomBetween) {
|
| + UseVersion3();
|
| + BackendDoomBetween();
|
| +}
|
| +
|
| TEST_F(DiskCacheBackendTest, NewEvictionDoomBetween) {
|
| SetNewEviction();
|
| BackendDoomBetween();
|
| }
|
|
|
| +TEST_F(DiskCacheBackendTest, V3NewEvictionDoomBetween) {
|
| + UseVersion3();
|
| + SetNewEviction();
|
| + BackendDoomBetween();
|
| +}
|
| +
|
| TEST_F(DiskCacheBackendTest, MemoryOnlyDoomBetween) {
|
| SetMemoryOnlyMode();
|
| BackendDoomBetween();
|
| @@ -1521,6 +1793,19 @@
|
| EXPECT_EQ(3, cache_->GetEntryCount());
|
| }
|
|
|
| +TEST_F(DiskCacheBackendTest, V3DoomEntriesBetweenSparse) {
|
| + base::Time start, end;
|
| + UseVersion3();
|
| + InitSparseCache(&start, &end);
|
| + DoomEntriesBetween(start, end);
|
| + EXPECT_EQ(9, cache_->GetEntryCount());
|
| +
|
| + start = end;
|
| + end = base::Time::Now();
|
| + DoomEntriesBetween(start, end);
|
| + EXPECT_EQ(3, cache_->GetEntryCount());
|
| +}
|
| +
|
| void DiskCacheBackendTest::BackendTransaction(const std::string& name,
|
| int num_entries, bool load) {
|
| success_ = false;
|
| @@ -2401,8 +2686,8 @@
|
| const int kBufSize = 20000;
|
| scoped_refptr<net::IOBuffer> buf(new net::IOBuffer(kBufSize));
|
| memset(buf->data(), 0, kBufSize);
|
| - EXPECT_EQ(100, WriteData(entry2, 0, 0, buf, 100, false));
|
| - EXPECT_EQ(kBufSize, WriteData(entry3, 0, 0, buf, kBufSize, false));
|
| + EXPECT_EQ(100, WriteData(entry2, 0, 0, buf.get(), 100, false));
|
| + EXPECT_EQ(kBufSize, WriteData(entry3, 0, 0, buf.get(), kBufSize, false));
|
|
|
| // This line should disable the cache but not delete it.
|
| EXPECT_NE(net::OK, OpenNextEntry(&iter, &entry4));
|
| @@ -2410,13 +2695,13 @@
|
|
|
| EXPECT_NE(net::OK, CreateEntry("cache is disabled", &entry4));
|
|
|
| - EXPECT_EQ(100, ReadData(entry2, 0, 0, buf, 100));
|
| - EXPECT_EQ(100, WriteData(entry2, 0, 0, buf, 100, false));
|
| - EXPECT_EQ(100, WriteData(entry2, 1, 0, buf, 100, false));
|
| + EXPECT_EQ(100, ReadData(entry2, 0, 0, buf.get(), 100));
|
| + EXPECT_EQ(100, WriteData(entry2, 0, 0, buf.get(), 100, false));
|
| + EXPECT_EQ(100, WriteData(entry2, 1, 0, buf.get(), 100, false));
|
|
|
| - EXPECT_EQ(kBufSize, ReadData(entry3, 0, 0, buf, kBufSize));
|
| - EXPECT_EQ(kBufSize, WriteData(entry3, 0, 0, buf, kBufSize, false));
|
| - EXPECT_EQ(kBufSize, WriteData(entry3, 1, 0, buf, kBufSize, false));
|
| + EXPECT_EQ(kBufSize, ReadData(entry3, 0, 0, buf.get(), kBufSize));
|
| + EXPECT_EQ(kBufSize, WriteData(entry3, 0, 0, buf.get(), kBufSize, false));
|
| + EXPECT_EQ(kBufSize, WriteData(entry3, 1, 0, buf.get(), kBufSize, false));
|
|
|
| std::string key = entry2->GetKey();
|
| EXPECT_EQ(sizeof(key2) - 1, key.size());
|
| @@ -2447,7 +2732,7 @@
|
| BackendDisable4();
|
| }
|
|
|
| -TEST_F(DiskCacheTest, Backend_UsageStats) {
|
| +TEST_F(DiskCacheTest, Backend_UsageStatsTimer) {
|
| MessageLoopHelper helper;
|
|
|
| ASSERT_TRUE(CleanupCacheDir());
|
| @@ -2464,6 +2749,35 @@
|
| helper.WaitUntilCacheIoFinished(1);
|
| }
|
|
|
| +TEST_F(DiskCacheBackendTest, Backend_UsageStats) {
|
| + InitCache();
|
| + disk_cache::Entry* entry;
|
| + ASSERT_EQ(net::OK, CreateEntry("key", &entry));
|
| + entry->Close();
|
| + FlushQueueForTest();
|
| +
|
| + disk_cache::StatsItems stats;
|
| + cache_->GetStats(&stats);
|
| + EXPECT_FALSE(stats.empty());
|
| +
|
| + disk_cache::StatsItems::value_type hits("Create hit", "0x1");
|
| + EXPECT_EQ(1, std::count(stats.begin(), stats.end(), hits));
|
| +
|
| + delete cache_;
|
| + cache_ = NULL;
|
| +
|
| + // Now open the cache and verify that the stats are still there.
|
| + DisableFirstCleanup();
|
| + InitCache();
|
| + EXPECT_EQ(1, cache_->GetEntryCount());
|
| +
|
| + stats.clear();
|
| + cache_->GetStats(&stats);
|
| + EXPECT_FALSE(stats.empty());
|
| +
|
| + EXPECT_EQ(1, std::count(stats.begin(), stats.end(), hits));
|
| +}
|
| +
|
| void DiskCacheBackendTest::BackendDoomAll() {
|
| InitCache();
|
|
|
| @@ -2514,11 +2828,22 @@
|
| BackendDoomAll();
|
| }
|
|
|
| +TEST_F(DiskCacheBackendTest, V3DoomAll) {
|
| + UseVersion3();
|
| + BackendDoomAll();
|
| +}
|
| +
|
| TEST_F(DiskCacheBackendTest, NewEvictionDoomAll) {
|
| SetNewEviction();
|
| BackendDoomAll();
|
| }
|
|
|
| +TEST_F(DiskCacheBackendTest, V3NewEvictionDoomAll) {
|
| + UseVersion3();
|
| + SetNewEviction();
|
| + BackendDoomAll();
|
| +}
|
| +
|
| TEST_F(DiskCacheBackendTest, MemoryOnlyDoomAll) {
|
| SetMemoryOnlyMode();
|
| BackendDoomAll();
|
| @@ -2662,7 +2987,7 @@
|
|
|
| // Make sure that we keep the total memory used by the internal buffers under
|
| // control.
|
| -TEST_F(DiskCacheBackendTest, TotalBuffersSize1) {
|
| +void DiskCacheBackendTest::BackendTotalBuffersSize1() {
|
| InitCache();
|
| std::string key("the first key");
|
| disk_cache::Entry* entry;
|
| @@ -2675,14 +3000,16 @@
|
| for (int i = 0; i < 10; i++) {
|
| SCOPED_TRACE(i);
|
| // Allocate 2MB for this entry.
|
| - EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer, kSize, true));
|
| - EXPECT_EQ(kSize, WriteData(entry, 1, 0, buffer, kSize, true));
|
| - EXPECT_EQ(kSize, WriteData(entry, 0, 1024 * 1024, buffer, kSize, false));
|
| - EXPECT_EQ(kSize, WriteData(entry, 1, 1024 * 1024, buffer, kSize, false));
|
| + EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, true));
|
| + EXPECT_EQ(kSize, WriteData(entry, 1, 0, buffer.get(), kSize, true));
|
| + EXPECT_EQ(kSize,
|
| + WriteData(entry, 0, 1024 * 1024, buffer.get(), kSize, false));
|
| + EXPECT_EQ(kSize,
|
| + WriteData(entry, 1, 1024 * 1024, buffer.get(), kSize, false));
|
|
|
| // Delete one of the buffers and truncate the other.
|
| - EXPECT_EQ(0, WriteData(entry, 0, 0, buffer, 0, true));
|
| - EXPECT_EQ(0, WriteData(entry, 1, 10, buffer, 0, true));
|
| + EXPECT_EQ(0, WriteData(entry, 0, 0, buffer.get(), 0, true));
|
| + EXPECT_EQ(0, WriteData(entry, 1, 10, buffer.get(), 0, true));
|
|
|
| // Delete the second buffer, writing 10 bytes to disk.
|
| entry->Close();
|
| @@ -2690,35 +3017,53 @@
|
| }
|
|
|
| entry->Close();
|
| - EXPECT_EQ(0, cache_impl_->GetTotalBuffersSize());
|
| + EXPECT_EQ(0, GetTotalBuffersSize());
|
| }
|
|
|
| +TEST_F(DiskCacheBackendTest, TotalBuffersSize1) {
|
| + BackendTotalBuffersSize1();
|
| +}
|
| +
|
| +TEST_F(DiskCacheBackendTest, V3TotalBuffersSize1) {
|
| + UseVersion3();
|
| + BackendTotalBuffersSize1();
|
| +}
|
| +
|
| // This test assumes at least 150MB of system memory.
|
| -TEST_F(DiskCacheBackendTest, TotalBuffersSize2) {
|
| +void DiskCacheBackendTest::BackendTotalBuffersSize2() {
|
| InitCache();
|
|
|
| const int kOneMB = 1024 * 1024;
|
| - EXPECT_TRUE(cache_impl_->IsAllocAllowed(0, kOneMB));
|
| - EXPECT_EQ(kOneMB, cache_impl_->GetTotalBuffersSize());
|
| + EXPECT_TRUE(IsAllocAllowed(0, kOneMB));
|
| + EXPECT_EQ(kOneMB, GetTotalBuffersSize());
|
|
|
| - EXPECT_TRUE(cache_impl_->IsAllocAllowed(0, kOneMB));
|
| - EXPECT_EQ(kOneMB * 2, cache_impl_->GetTotalBuffersSize());
|
| + EXPECT_TRUE(IsAllocAllowed(0, kOneMB));
|
| + EXPECT_EQ(kOneMB * 2, GetTotalBuffersSize());
|
|
|
| - EXPECT_TRUE(cache_impl_->IsAllocAllowed(0, kOneMB));
|
| - EXPECT_EQ(kOneMB * 3, cache_impl_->GetTotalBuffersSize());
|
| + EXPECT_TRUE(IsAllocAllowed(0, kOneMB));
|
| + EXPECT_EQ(kOneMB * 3, GetTotalBuffersSize());
|
|
|
| - cache_impl_->BufferDeleted(kOneMB);
|
| - EXPECT_EQ(kOneMB * 2, cache_impl_->GetTotalBuffersSize());
|
| + BufferDeleted(kOneMB);
|
| + EXPECT_EQ(kOneMB * 2, GetTotalBuffersSize());
|
|
|
| // Check the upper limit.
|
| - EXPECT_FALSE(cache_impl_->IsAllocAllowed(0, 30 * kOneMB));
|
| + EXPECT_FALSE(IsAllocAllowed(0, 30 * kOneMB));
|
|
|
| for (int i = 0; i < 30; i++)
|
| - cache_impl_->IsAllocAllowed(0, kOneMB); // Ignore the result.
|
| + IsAllocAllowed(0, kOneMB); // Ignore the result.
|
|
|
| - EXPECT_FALSE(cache_impl_->IsAllocAllowed(0, kOneMB));
|
| + EXPECT_FALSE(IsAllocAllowed(0, kOneMB));
|
| }
|
|
|
| +TEST_F(DiskCacheBackendTest, TotalBuffersSize2) {
|
| + BackendTotalBuffersSize2();
|
| +}
|
| +
|
| +TEST_F(DiskCacheBackendTest, V3TotalBuffersSize2) {
|
| + UseVersion3();
|
| + BackendTotalBuffersSize2();
|
| +}
|
| +
|
| // Tests that sharing of external files works and we are able to delete the
|
| // files when we need to.
|
| TEST_F(DiskCacheBackendTest, FileSharing) {
|
| @@ -2759,7 +3104,7 @@
|
| EXPECT_TRUE(disk_cache::DeleteCacheFile(name));
|
| }
|
|
|
| -TEST_F(DiskCacheBackendTest, UpdateRankForExternalCacheHit) {
|
| +void DiskCacheBackendTest::BackendUpdateRankForExternalCacheHit() {
|
| InitCache();
|
|
|
| disk_cache::Entry* entry;
|
| @@ -2768,40 +3113,41 @@
|
| std::string key = base::StringPrintf("key%d", i);
|
| ASSERT_EQ(net::OK, CreateEntry(key, &entry));
|
| entry->Close();
|
| + AddDelayForTest(kDelayToNextTimestamp);
|
| }
|
| + FlushQueueForTest();
|
|
|
| // Ping the oldest entry.
|
| + SetTestMode();
|
| cache_->OnExternalCacheHit("key0");
|
| + FlushQueueForTest();
|
|
|
| TrimForTest(false);
|
|
|
| // Make sure the older key remains.
|
| - EXPECT_EQ(1, cache_->GetEntryCount());
|
| + EXPECT_NE(net::OK, OpenEntry("key1", &entry));
|
| ASSERT_EQ(net::OK, OpenEntry("key0", &entry));
|
| entry->Close();
|
| }
|
|
|
| +TEST_F(DiskCacheBackendTest, UpdateRankForExternalCacheHit) {
|
| + BackendUpdateRankForExternalCacheHit();
|
| +}
|
| +
|
| +TEST_F(DiskCacheBackendTest, V3UpdateRankForExternalCacheHit) {
|
| + UseVersion3();
|
| + BackendUpdateRankForExternalCacheHit();
|
| +}
|
| +
|
| TEST_F(DiskCacheBackendTest, ShaderCacheUpdateRankForExternalCacheHit) {
|
| SetCacheType(net::SHADER_CACHE);
|
| - InitCache();
|
| + BackendUpdateRankForExternalCacheHit();
|
| +}
|
|
|
| - disk_cache::Entry* entry;
|
| -
|
| - for (int i = 0; i < 2; ++i) {
|
| - std::string key = base::StringPrintf("key%d", i);
|
| - ASSERT_EQ(net::OK, CreateEntry(key, &entry));
|
| - entry->Close();
|
| - }
|
| -
|
| - // Ping the oldest entry.
|
| - cache_->OnExternalCacheHit("key0");
|
| -
|
| - TrimForTest(false);
|
| -
|
| - // Make sure the older key remains.
|
| - EXPECT_EQ(1, cache_->GetEntryCount());
|
| - ASSERT_EQ(net::OK, OpenEntry("key0", &entry));
|
| - entry->Close();
|
| +TEST_F(DiskCacheBackendTest, V3ShaderCacheUpdateRankForExternalCacheHit) {
|
| + UseVersion3();
|
| + SetCacheType(net::SHADER_CACHE);
|
| + BackendUpdateRankForExternalCacheHit();
|
| }
|
|
|
| void DiskCacheBackendTest::TracingBackendBasics() {
|
| @@ -2974,7 +3320,7 @@
|
| scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
|
| CacheTestFillBuffer(buffer->data(), kSize, false);
|
| ASSERT_EQ(net::OK, CreateEntry("key", &entry));
|
| - ASSERT_EQ(0, WriteData(entry, 0, 0, buffer, 0, false));
|
| + ASSERT_EQ(0, WriteData(entry, 0, 0, buffer.get(), 0, false));
|
| entry->Close();
|
| delete cache_;
|
| cache_ = NULL;
|
| @@ -2984,8 +3330,10 @@
|
| ASSERT_TRUE(cache_thread.StartWithOptions(
|
| base::Thread::Options(MessageLoop::TYPE_IO, 0)));
|
| disk_cache::SimpleBackendImpl* simple_cache =
|
| - new disk_cache::SimpleBackendImpl(cache_path_, 0, net::DISK_CACHE,
|
| - cache_thread.message_loop_proxy(),
|
| + new disk_cache::SimpleBackendImpl(cache_path_,
|
| + 0,
|
| + net::DISK_CACHE,
|
| + cache_thread.message_loop_proxy().get(),
|
| NULL);
|
| net::TestCompletionCallback cb;
|
| int rv = simple_cache->Init(cb.callback());
|
| @@ -3005,7 +3353,7 @@
|
| scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
|
| CacheTestFillBuffer(buffer->data(), kSize, false);
|
| ASSERT_EQ(net::OK, CreateEntry("key", &entry));
|
| - ASSERT_EQ(0, WriteData(entry, 0, 0, buffer, 0, false));
|
| + ASSERT_EQ(0, WriteData(entry, 0, 0, buffer.get(), 0, false));
|
| entry->Close();
|
| delete cache_;
|
| cache_ = NULL;
|
|
|