| Index: net/disk_cache/simple/simple_unittest.cc
|
| diff --git a/net/disk_cache/simple/simple_unittest.cc b/net/disk_cache/simple/simple_unittest.cc
|
| new file mode 100644
|
| index 0000000000000000000000000000000000000000..846f831e62b024cf1ba47330dcfcadd971b36943
|
| --- /dev/null
|
| +++ b/net/disk_cache/simple/simple_unittest.cc
|
| @@ -0,0 +1,1792 @@
|
| +// Copyright (c) 2014 The Chromium Authors. All rights reserved.
|
| +// Use of this source code is governed by a BSD-style license that can be
|
| +// found in the LICENSE file.
|
| +
|
| +#include <set>
|
| +#include <string>
|
| +
|
| +#include "base/file_util.h"
|
| +#include "base/platform_file.h"
|
| +#include "base/strings/string_util.h"
|
| +#include "base/strings/stringprintf.h"
|
| +#include "net/base/io_buffer.h"
|
| +#include "net/base/test_completion_callback.h"
|
| +#include "net/disk_cache/backend_tests.h"
|
| +#include "net/disk_cache/cache_util.h"
|
| +#include "net/disk_cache/disk_cache_test.h"
|
| +#include "net/disk_cache/disk_cache_test_util.h"
|
| +#include "net/disk_cache/entry_tests.h"
|
| +#include "net/disk_cache/simple/simple_backend_impl.h"
|
| +#include "net/disk_cache/simple/simple_index.h"
|
| +#include "net/disk_cache/simple/simple_entry_format.h"
|
| +#include "net/disk_cache/simple/simple_entry_impl.h"
|
| +#include "net/disk_cache/simple/simple_synchronous_entry.h"
|
| +#include "net/disk_cache/simple/simple_test_util.h"
|
| +#include "net/disk_cache/simple/simple_util.h"
|
| +#include "base/single_thread_task_runner.h"
|
| +#include "base/memory/scoped_ptr.h"
|
| +
|
| +namespace disk_cache {
|
| +
|
| +namespace {
|
| +
|
| +class SimpleCacheCreateBackendExtraData
|
| + : public BackendTestTraits::CreateBackendExtraData {
|
| + public:
|
| + SimpleCacheCreateBackendExtraData() : do_not_wait_for_index_(false) {}
|
| +
|
| + bool do_not_wait_for_index() const { return do_not_wait_for_index_; }
|
| + void set_do_not_wait_for_index(bool do_not_wait_for_index) {
|
| + do_not_wait_for_index_ = do_not_wait_for_index;
|
| + }
|
| +
|
| + private:
|
| + bool do_not_wait_for_index_;
|
| +};
|
| +
|
| +class SimpleCacheBackendTraits : public BackendTestTraits {
|
| + public:
|
| + virtual ~SimpleCacheBackendTraits() {}
|
| +
|
| + virtual Backend* CreateBackend(
|
| + const CreateBackendExtraData* extra_data,
|
| + const base::FilePath& cache_path,
|
| + int max_size,
|
| + base::MessageLoopProxy* task_runner) const OVERRIDE {
|
| + const SimpleCacheCreateBackendExtraData* simple_cache_extra_data =
|
| + static_cast<const SimpleCacheCreateBackendExtraData*>(extra_data);
|
| +
|
| + net::TestCompletionCallback cb;
|
| + scoped_ptr<SimpleBackendImpl> simple_backend(
|
| + new SimpleBackendImpl(
|
| + cache_path, max_size, type_,
|
| + make_scoped_refptr(task_runner).get(), NULL));
|
| + int rv = simple_backend->Init(cb.callback());
|
| + if (cb.GetResult(rv) != net::OK)
|
| + return NULL;
|
| + if (simple_cache_extra_data &&
|
| + !simple_cache_extra_data->do_not_wait_for_index()) {
|
| + net::TestCompletionCallback wait_for_index_cb;
|
| + rv = simple_backend->index()->ExecuteWhenReady(
|
| + wait_for_index_cb.callback());
|
| + if (wait_for_index_cb.GetResult(rv) != net::OK)
|
| + return NULL;
|
| + }
|
| + return simple_backend.release();
|
| + }
|
| +
|
| + virtual bool UsesCacheThread() const OVERRIDE { return false; }
|
| + virtual bool ImplementsCouldBeSparse() const OVERRIDE { return false; }
|
| + virtual bool DoomedSparseEntriesIOWorks() const OVERRIDE { return false; }
|
| +
|
| + virtual void AddDelay() const OVERRIDE {
|
| + // The simple cache uses second resolution for many timeouts, so it's safest
|
| + // to advance by at least whole seconds before falling back into the normal
|
| + // disk cache epsilon advance.
|
| + const base::Time initial_time = base::Time::Now();
|
| + do {
|
| + base::PlatformThread::YieldCurrentThread();
|
| + } while (base::Time::Now() -
|
| + initial_time < base::TimeDelta::FromSeconds(1));
|
| +
|
| + BackendTestTraits::AddDelay();
|
| + }
|
| +
|
| + virtual bool SetMaxSize(Backend* backend, int size) const OVERRIDE {
|
| + return static_cast<SimpleBackendImpl*>(backend)->SetMaxSize(size);
|
| + }
|
| +
|
| + net::CacheType cache_type() const { return type_; }
|
| +
|
| + static const BackendTestTraits* DiskCache() {
|
| + static SimpleCacheBackendTraits traits(net::DISK_CACHE);
|
| + return &traits;
|
| + }
|
| +
|
| + static const BackendTestTraits* AppCache() {
|
| + static SimpleCacheBackendTraits traits(net::DISK_CACHE);
|
| + return &traits;
|
| + }
|
| +
|
| + private:
|
| + explicit SimpleCacheBackendTraits(net::CacheType type) : type_(type) {}
|
| +
|
| + const net::CacheType type_;
|
| +};
|
| +
|
| +INSTANTIATE_TEST_CASE_P(
|
| + SimpleCache, DiskCacheEntryTest,
|
| + ::testing::Values(SimpleCacheBackendTraits::DiskCache(),
|
| + SimpleCacheBackendTraits::AppCache()));
|
| +
|
| +INSTANTIATE_TEST_CASE_P(
|
| + SimpleCache, DiskCacheBackendTest,
|
| + ::testing::Values(SimpleCacheBackendTraits::DiskCache(),
|
| + SimpleCacheBackendTraits::AppCache()));
|
| +
|
| +class DiskCacheSimpleEntryTest : public DiskCacheTest {
|
| + protected:
|
| + net::CacheType cache_type() const { return simple_traits()->cache_type(); }
|
| +
|
| + private:
|
| + const SimpleCacheBackendTraits* simple_traits() const {
|
| + return
|
| + static_cast<const SimpleCacheBackendTraits*>(DiskCacheTest::traits());
|
| + }
|
| +};
|
| +
|
| +INSTANTIATE_TEST_CASE_P(
|
| + SimpleCache, DiskCacheSimpleEntryTest,
|
| + ::testing::Values(SimpleCacheBackendTraits::DiskCache(),
|
| + SimpleCacheBackendTraits::AppCache()));
|
| +
|
| +class DiskCacheSimpleBackendTest : public DiskCacheTest {};
|
| +
|
| +INSTANTIATE_TEST_CASE_P(
|
| + SimpleCache, DiskCacheSimpleBackendTest,
|
| + ::testing::Values(SimpleCacheBackendTraits::DiskCache(),
|
| + SimpleCacheBackendTraits::AppCache()));
|
| +
|
| +// Individual unit tests and helper functions after here.
|
| +
|
| +bool MakeBadChecksumEntry(const std::string& key,
|
| + DiskCacheTest* test,
|
| + int* out_data_size) {
|
| + disk_cache::Entry* entry = NULL;
|
| +
|
| + if (test->CreateEntry(key, &entry) != net::OK || !entry) {
|
| + LOG(ERROR) << "Could not create entry";
|
| + return false;
|
| + }
|
| +
|
| + const char data[] = "this is very good data";
|
| + const int kDataSize = arraysize(data);
|
| + scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kDataSize));
|
| + base::strlcpy(buffer->data(), data, kDataSize);
|
| +
|
| + EXPECT_EQ(kDataSize, test->WriteData(entry, 1, 0, buffer.get(), kDataSize, false));
|
| + entry->Close();
|
| + entry = NULL;
|
| +
|
| + // Corrupt the last byte of the data.
|
| + base::FilePath entry_file0_path = test->cache_path().AppendASCII(
|
| + disk_cache::simple_util::GetFilenameFromKeyAndFileIndex(key, 0));
|
| + int flags = base::PLATFORM_FILE_WRITE | base::PLATFORM_FILE_OPEN;
|
| + base::PlatformFile entry_file0 =
|
| + base::CreatePlatformFile(entry_file0_path, flags, NULL, NULL);
|
| + if (entry_file0 == base::kInvalidPlatformFileValue)
|
| + return false;
|
| +
|
| + int64 file_offset =
|
| + sizeof(disk_cache::SimpleFileHeader) + key.size() + kDataSize - 2;
|
| + EXPECT_EQ(1, base::WritePlatformFile(entry_file0, file_offset, "X", 1));
|
| + if (!base::ClosePlatformFile(entry_file0))
|
| + return false;
|
| + *out_data_size = kDataSize;
|
| + return true;
|
| +}
|
| +
|
| +// Tests that the simple cache can detect entries that have bad data.
|
| +TEST_P(DiskCacheSimpleEntryTest, BadChecksum) {
|
| + InitCache();
|
| +
|
| + const char key[] = "the first key";
|
| + int size_unused;
|
| + ASSERT_TRUE(MakeBadChecksumEntry(key, this, &size_unused));
|
| +
|
| + disk_cache::Entry* entry = NULL;
|
| +
|
| + // Open the entry.
|
| + ASSERT_EQ(net::OK, OpenEntry(key, &entry));
|
| + ScopedEntryPtr entry_closer(entry);
|
| +
|
| + const int kReadBufferSize = 200;
|
| + EXPECT_GE(kReadBufferSize, entry->GetDataSize(1));
|
| + scoped_refptr<net::IOBuffer> read_buffer(new net::IOBuffer(kReadBufferSize));
|
| + EXPECT_EQ(net::ERR_CACHE_CHECKSUM_MISMATCH,
|
| + ReadData(entry, 1, 0, read_buffer.get(), kReadBufferSize));
|
| +}
|
| +
|
| +// Tests that an entry that has had an IO error occur can still be Doomed().
|
| +TEST_P(DiskCacheSimpleEntryTest, ErrorThenDoom) {
|
| + InitCache();
|
| +
|
| + const char key[] = "the first key";
|
| + int size_unused;
|
| + ASSERT_TRUE(MakeBadChecksumEntry(key, this, &size_unused));
|
| +
|
| + disk_cache::Entry* entry = NULL;
|
| +
|
| + // Open the entry, forcing an IO error.
|
| + ASSERT_EQ(net::OK, OpenEntry(key, &entry));
|
| + ScopedEntryPtr entry_closer(entry);
|
| +
|
| + const int kReadBufferSize = 200;
|
| + EXPECT_GE(kReadBufferSize, entry->GetDataSize(1));
|
| + scoped_refptr<net::IOBuffer> read_buffer(new net::IOBuffer(kReadBufferSize));
|
| + EXPECT_EQ(net::ERR_CACHE_CHECKSUM_MISMATCH,
|
| + ReadData(entry, 1, 0, read_buffer.get(), kReadBufferSize));
|
| +
|
| + entry->Doom(); // Should not crash.
|
| +}
|
| +
|
| +bool TruncatePath(const base::FilePath& file_path, int64 length) {
|
| + const int flags = base::PLATFORM_FILE_WRITE | base::PLATFORM_FILE_OPEN;
|
| + base::PlatformFile file =
|
| + base::CreatePlatformFile(file_path, flags, NULL, NULL);
|
| + if (base::kInvalidPlatformFileValue == file)
|
| + return false;
|
| + const bool result = base::TruncatePlatformFile(file, length);
|
| + base::ClosePlatformFile(file);
|
| + return result;
|
| +}
|
| +
|
| +TEST_P(DiskCacheSimpleEntryTest, NoEOF) {
|
| + InitCache();
|
| +
|
| + const char key[] = "the first key";
|
| +
|
| + disk_cache::Entry* entry = NULL;
|
| + ASSERT_EQ(net::OK, CreateEntry(key, &entry));
|
| + disk_cache::Entry* null = NULL;
|
| + EXPECT_NE(null, entry);
|
| + entry->Close();
|
| + entry = NULL;
|
| +
|
| + // Force the entry to flush to disk, so subsequent platform file operations
|
| + // succed.
|
| + ASSERT_EQ(net::OK, OpenEntry(key, &entry));
|
| + entry->Close();
|
| + entry = NULL;
|
| +
|
| + // Truncate the file such that the length isn't sufficient to have an EOF
|
| + // record.
|
| + int kTruncationBytes = -implicit_cast<int>(sizeof(disk_cache::SimpleFileEOF));
|
| + const base::FilePath entry_path = cache_path().AppendASCII(
|
| + disk_cache::simple_util::GetFilenameFromKeyAndFileIndex(key, 0));
|
| + const int64 invalid_size =
|
| + disk_cache::simple_util::GetFileSizeFromKeyAndDataSize(key,
|
| + kTruncationBytes);
|
| + EXPECT_TRUE(TruncatePath(entry_path, invalid_size));
|
| + EXPECT_EQ(net::ERR_FAILED, OpenEntry(key, &entry));
|
| +}
|
| +
|
| +TEST_P(DiskCacheSimpleEntryTest, NonOptimisticOperationsBasic) {
|
| + // Test sequence:
|
| + // Create, Write, Read, Close.
|
| +
|
| + // APP_CACHE doesn't use optimistic operations.
|
| + TEST_DISABLED_IF(cache_type() != net::APP_CACHE);
|
| + InitCache();
|
| + disk_cache::Entry* const null_entry = NULL;
|
| +
|
| + disk_cache::Entry* entry = NULL;
|
| + EXPECT_EQ(net::OK, CreateEntry("my key", &entry));
|
| + ASSERT_NE(null_entry, entry);
|
| + ScopedEntryPtr entry_closer(entry);
|
| +
|
| + const int kBufferSize = 10;
|
| + scoped_refptr<net::IOBufferWithSize> write_buffer(
|
| + new net::IOBufferWithSize(kBufferSize));
|
| + CacheTestFillBuffer(write_buffer->data(), write_buffer->size(), false);
|
| + EXPECT_EQ(
|
| + write_buffer->size(),
|
| + WriteData(entry, 1, 0, write_buffer.get(), write_buffer->size(), false));
|
| +
|
| + scoped_refptr<net::IOBufferWithSize> read_buffer(
|
| + new net::IOBufferWithSize(kBufferSize));
|
| + EXPECT_EQ(read_buffer->size(),
|
| + ReadData(entry, 1, 0, read_buffer.get(), read_buffer->size()));
|
| +}
|
| +
|
| +TEST_P(DiskCacheSimpleEntryTest, NonOptimisticOperationsDontBlock) {
|
| + // Test sequence:
|
| + // Create, Write, Close.
|
| +
|
| + // APP_CACHE doesn't use optimistic operations.
|
| + TEST_DISABLED_IF(cache_type() != net::APP_CACHE);
|
| + InitCache();
|
| + disk_cache::Entry* const null_entry = NULL;
|
| +
|
| + MessageLoopHelper helper;
|
| + CallbackTest create_callback(&helper, false);
|
| +
|
| + int expected_callback_runs = 0;
|
| + const int kBufferSize = 10;
|
| + scoped_refptr<net::IOBufferWithSize> write_buffer(
|
| + new net::IOBufferWithSize(kBufferSize));
|
| +
|
| + disk_cache::Entry* entry = NULL;
|
| + EXPECT_EQ(net::OK, CreateEntry("my key", &entry));
|
| + ASSERT_NE(null_entry, entry);
|
| + ScopedEntryPtr entry_closer(entry);
|
| +
|
| + CacheTestFillBuffer(write_buffer->data(), write_buffer->size(), false);
|
| + CallbackTest write_callback(&helper, false);
|
| + int ret = entry->WriteData(
|
| + 1,
|
| + 0,
|
| + write_buffer.get(),
|
| + write_buffer->size(),
|
| + base::Bind(&CallbackTest::Run, base::Unretained(&write_callback)),
|
| + false);
|
| + ASSERT_EQ(net::ERR_IO_PENDING, ret);
|
| + helper.WaitUntilCacheIoFinished(++expected_callback_runs);
|
| +}
|
| +
|
| +TEST_P(DiskCacheSimpleEntryTest, NonOptimisticOperationsBasicsWithoutWaiting) {
|
| + // Test sequence:
|
| + // Create, Write, Read, Close.
|
| +
|
| + // APP_CACHE doesn't use optimistic operations.
|
| + TEST_DISABLED_IF(cache_type() != net::APP_CACHE);
|
| + InitCache();
|
| + disk_cache::Entry* const null_entry = NULL;
|
| + MessageLoopHelper helper;
|
| +
|
| + disk_cache::Entry* entry = NULL;
|
| + // Note that |entry| is only set once CreateEntry() completed which is why we
|
| + // have to wait (i.e. use the helper CreateEntry() function).
|
| + EXPECT_EQ(net::OK, CreateEntry("my key", &entry));
|
| + ASSERT_NE(null_entry, entry);
|
| + ScopedEntryPtr entry_closer(entry);
|
| +
|
| + const int kBufferSize = 10;
|
| + scoped_refptr<net::IOBufferWithSize> write_buffer(
|
| + new net::IOBufferWithSize(kBufferSize));
|
| + CacheTestFillBuffer(write_buffer->data(), write_buffer->size(), false);
|
| + CallbackTest write_callback(&helper, false);
|
| + int ret = entry->WriteData(
|
| + 1,
|
| + 0,
|
| + write_buffer.get(),
|
| + write_buffer->size(),
|
| + base::Bind(&CallbackTest::Run, base::Unretained(&write_callback)),
|
| + false);
|
| + EXPECT_EQ(net::ERR_IO_PENDING, ret);
|
| + int expected_callback_runs = 1;
|
| +
|
| + scoped_refptr<net::IOBufferWithSize> read_buffer(
|
| + new net::IOBufferWithSize(kBufferSize));
|
| + CallbackTest read_callback(&helper, false);
|
| + ret = entry->ReadData(
|
| + 1,
|
| + 0,
|
| + read_buffer.get(),
|
| + read_buffer->size(),
|
| + base::Bind(&CallbackTest::Run, base::Unretained(&read_callback)));
|
| + EXPECT_EQ(net::ERR_IO_PENDING, ret);
|
| + ++expected_callback_runs;
|
| +
|
| + helper.WaitUntilCacheIoFinished(expected_callback_runs);
|
| + ASSERT_EQ(read_buffer->size(), write_buffer->size());
|
| + EXPECT_EQ(
|
| + 0,
|
| + memcmp(read_buffer->data(), write_buffer->data(), read_buffer->size()));
|
| +}
|
| +
|
| +TEST_P(DiskCacheSimpleEntryTest, Optimistic) {
|
| + // Test sequence:
|
| + // Create, Write, Read, Write, Read, Close.
|
| +
|
| + // Only net::DISK_CACHE runs optimistic operations.
|
| + TEST_DISABLED_IF(cache_type() != net::DISK_CACHE);
|
| + InitCache();
|
| + disk_cache::Entry* null = NULL;
|
| + const char key[] = "the first key";
|
| +
|
| + MessageLoopHelper helper;
|
| + CallbackTest callback1(&helper, false);
|
| + CallbackTest callback2(&helper, false);
|
| + CallbackTest callback3(&helper, false);
|
| + CallbackTest callback4(&helper, false);
|
| + CallbackTest callback5(&helper, false);
|
| +
|
| + int expected = 0;
|
| + const int kSize1 = 10;
|
| + const int kSize2 = 20;
|
| + scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize1));
|
| + scoped_refptr<net::IOBuffer> buffer1_read(new net::IOBuffer(kSize1));
|
| + scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize2));
|
| + scoped_refptr<net::IOBuffer> buffer2_read(new net::IOBuffer(kSize2));
|
| + CacheTestFillBuffer(buffer1->data(), kSize1, false);
|
| + CacheTestFillBuffer(buffer2->data(), kSize2, false);
|
| +
|
| + disk_cache::Entry* entry = NULL;
|
| + // Create is optimistic, must return OK.
|
| + ASSERT_EQ(net::OK,
|
| + cache()->CreateEntry(key, &entry,
|
| + base::Bind(&CallbackTest::Run,
|
| + base::Unretained(&callback1))));
|
| + EXPECT_NE(null, entry);
|
| + ScopedEntryPtr entry_closer(entry);
|
| +
|
| + // This write may or may not be optimistic (it depends if the previous
|
| + // optimistic create already finished by the time we call the write here).
|
| + int ret = entry->WriteData(
|
| + 1,
|
| + 0,
|
| + buffer1.get(),
|
| + kSize1,
|
| + base::Bind(&CallbackTest::Run, base::Unretained(&callback2)),
|
| + false);
|
| + EXPECT_TRUE(kSize1 == ret || net::ERR_IO_PENDING == ret);
|
| + if (net::ERR_IO_PENDING == ret)
|
| + expected++;
|
| +
|
| + // This Read must not be optimistic, since we don't support that yet.
|
| + EXPECT_EQ(net::ERR_IO_PENDING,
|
| + entry->ReadData(
|
| + 1,
|
| + 0,
|
| + buffer1_read.get(),
|
| + kSize1,
|
| + base::Bind(&CallbackTest::Run, base::Unretained(&callback3))));
|
| + expected++;
|
| + EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
|
| + EXPECT_EQ(0, memcmp(buffer1->data(), buffer1_read->data(), kSize1));
|
| +
|
| + // At this point after waiting, the pending operations queue on the entry
|
| + // should be empty, so the next Write operation must run as optimistic.
|
| + EXPECT_EQ(kSize2,
|
| + entry->WriteData(
|
| + 1,
|
| + 0,
|
| + buffer2.get(),
|
| + kSize2,
|
| + base::Bind(&CallbackTest::Run, base::Unretained(&callback4)),
|
| + false));
|
| +
|
| + // Lets do another read so we block until both the write and the read
|
| + // operation finishes and we can then test for HasOneRef() below.
|
| + EXPECT_EQ(net::ERR_IO_PENDING,
|
| + entry->ReadData(
|
| + 1,
|
| + 0,
|
| + buffer2_read.get(),
|
| + kSize2,
|
| + base::Bind(&CallbackTest::Run, base::Unretained(&callback5))));
|
| + expected++;
|
| +
|
| + EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
|
| + EXPECT_EQ(0, memcmp(buffer2->data(), buffer2_read->data(), kSize2));
|
| +
|
| + // Check that we are not leaking.
|
| + EXPECT_NE(entry, null);
|
| + EXPECT_TRUE(
|
| + static_cast<disk_cache::SimpleEntryImpl*>(entry)->HasOneRef());
|
| +}
|
| +
|
| +TEST_P(DiskCacheSimpleEntryTest, Optimistic2) {
|
| + // Test sequence:
|
| + // Create, Open, Close, Close.
|
| +
|
| + // Only net::DISK_CACHE runs optimistic operations.
|
| + TEST_DISABLED_IF(cache_type() != net::DISK_CACHE);
|
| + InitCache();
|
| + disk_cache::Entry* null = NULL;
|
| + const char key[] = "the first key";
|
| +
|
| + MessageLoopHelper helper;
|
| + CallbackTest callback1(&helper, false);
|
| + CallbackTest callback2(&helper, false);
|
| +
|
| + disk_cache::Entry* entry = NULL;
|
| + ASSERT_EQ(net::OK,
|
| + cache()->CreateEntry(key, &entry,
|
| + base::Bind(&CallbackTest::Run,
|
| + base::Unretained(&callback1))));
|
| + EXPECT_NE(null, entry);
|
| + ScopedEntryPtr entry_closer(entry);
|
| +
|
| + disk_cache::Entry* entry2 = NULL;
|
| + ASSERT_EQ(net::ERR_IO_PENDING,
|
| + cache()->OpenEntry(key, &entry2,
|
| + base::Bind(&CallbackTest::Run,
|
| + base::Unretained(&callback2))));
|
| + ASSERT_TRUE(helper.WaitUntilCacheIoFinished(1));
|
| +
|
| + EXPECT_NE(null, entry2);
|
| + EXPECT_EQ(entry, entry2);
|
| +
|
| + // We have to call close twice, since we called create and open above.
|
| + entry->Close();
|
| +
|
| + // Check that we are not leaking.
|
| + EXPECT_TRUE(
|
| + static_cast<disk_cache::SimpleEntryImpl*>(entry)->HasOneRef());
|
| +}
|
| +
|
| +TEST_P(DiskCacheSimpleEntryTest, Optimistic3) {
|
| + // Test sequence:
|
| + // Create, Close, Open, Close.
|
| + InitCache();
|
| + disk_cache::Entry* null = NULL;
|
| + const char key[] = "the first key";
|
| +
|
| + disk_cache::Entry* entry = NULL;
|
| + ASSERT_EQ(net::OK,
|
| + cache()->CreateEntry(key, &entry, net::CompletionCallback()));
|
| + EXPECT_NE(null, entry);
|
| + entry->Close();
|
| +
|
| + net::TestCompletionCallback cb;
|
| + disk_cache::Entry* entry2 = NULL;
|
| + ASSERT_EQ(net::ERR_IO_PENDING,
|
| + cache()->OpenEntry(key, &entry2, cb.callback()));
|
| + ASSERT_EQ(net::OK, cb.GetResult(net::ERR_IO_PENDING));
|
| + ScopedEntryPtr entry_closer(entry2);
|
| +
|
| + EXPECT_NE(null, entry2);
|
| + EXPECT_EQ(entry, entry2);
|
| +
|
| + // Check that we are not leaking.
|
| + EXPECT_TRUE(
|
| + static_cast<disk_cache::SimpleEntryImpl*>(entry2)->HasOneRef());
|
| +}
|
| +
|
| +TEST_P(DiskCacheSimpleEntryTest, Optimistic4) {
|
| + // Test sequence:
|
| + // Create, Close, Write, Open, Open, Close, Write, Read, Close.
|
| +
|
| + // Only net::DISK_CACHE runs optimistic operations.
|
| + TEST_DISABLED_IF(cache_type() != net::DISK_CACHE);
|
| + InitCache();
|
| + disk_cache::Entry* null = NULL;
|
| + const char key[] = "the first key";
|
| +
|
| + net::TestCompletionCallback cb;
|
| + const int kSize1 = 10;
|
| + scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize1));
|
| + CacheTestFillBuffer(buffer1->data(), kSize1, false);
|
| + disk_cache::Entry* entry = NULL;
|
| +
|
| + ASSERT_EQ(net::OK,
|
| + cache()->CreateEntry(key, &entry, net::CompletionCallback()));
|
| + EXPECT_NE(null, entry);
|
| + entry->Close();
|
| +
|
| + // Lets do a Write so we block until both the Close and the Write
|
| + // operation finishes. Write must fail since we are writing in a closed entry.
|
| + EXPECT_EQ(
|
| + net::ERR_IO_PENDING,
|
| + entry->WriteData(1, 0, buffer1.get(), kSize1, cb.callback(), false));
|
| + EXPECT_EQ(net::ERR_FAILED, cb.GetResult(net::ERR_IO_PENDING));
|
| +
|
| + // Finish running the pending tasks so that we fully complete the close
|
| + // operation and destroy the entry object.
|
| + base::MessageLoop::current()->RunUntilIdle();
|
| +
|
| + // At this point the |entry| must have been destroyed, and called
|
| + // RemoveSelfFromBackend().
|
| + disk_cache::Entry* entry2 = NULL;
|
| + ASSERT_EQ(net::ERR_IO_PENDING,
|
| + cache()->OpenEntry(key, &entry2, cb.callback()));
|
| + ASSERT_EQ(net::OK, cb.GetResult(net::ERR_IO_PENDING));
|
| + EXPECT_NE(null, entry2);
|
| +
|
| + disk_cache::Entry* entry3 = NULL;
|
| + ASSERT_EQ(net::ERR_IO_PENDING,
|
| + cache()->OpenEntry(key, &entry3, cb.callback()));
|
| + ASSERT_EQ(net::OK, cb.GetResult(net::ERR_IO_PENDING));
|
| + EXPECT_NE(null, entry3);
|
| + EXPECT_EQ(entry2, entry3);
|
| + entry3->Close();
|
| +
|
| + // The previous Close doesn't actually closes the entry since we opened it
|
| + // twice, so the next Write operation must succeed and it must be able to
|
| + // perform it optimistically, since there is no operation running on this
|
| + // entry.
|
| + EXPECT_EQ(kSize1,
|
| + entry2->WriteData(
|
| + 1, 0, buffer1.get(), kSize1, net::CompletionCallback(), false));
|
| +
|
| + // Lets do another read so we block until both the write and the read
|
| + // operation finishes and we can then test for HasOneRef() below.
|
| + EXPECT_EQ(net::ERR_IO_PENDING,
|
| + entry2->ReadData(1, 0, buffer1.get(), kSize1, cb.callback()));
|
| + EXPECT_EQ(kSize1, cb.GetResult(net::ERR_IO_PENDING));
|
| +
|
| + // Check that we are not leaking.
|
| + EXPECT_TRUE(
|
| + static_cast<disk_cache::SimpleEntryImpl*>(entry2)->HasOneRef());
|
| + entry2->Close();
|
| +}
|
| +
|
| +TEST_P(DiskCacheSimpleEntryTest, Optimistic5) {
|
| + // Test sequence:
|
| + // Create, Doom, Write, Read, Close.
|
| +
|
| + // Only net::DISK_CACHE runs optimistic operations.
|
| + TEST_DISABLED_IF(cache_type() != net::DISK_CACHE);
|
| + InitCache();
|
| + disk_cache::Entry* null = NULL;
|
| + const char key[] = "the first key";
|
| +
|
| + net::TestCompletionCallback cb;
|
| + const int kSize1 = 10;
|
| + scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize1));
|
| + CacheTestFillBuffer(buffer1->data(), kSize1, false);
|
| + disk_cache::Entry* entry = NULL;
|
| +
|
| + ASSERT_EQ(net::OK,
|
| + cache()->CreateEntry(key, &entry, net::CompletionCallback()));
|
| + EXPECT_NE(null, entry);
|
| + ScopedEntryPtr entry_closer(entry);
|
| + entry->Doom();
|
| +
|
| + EXPECT_EQ(
|
| + net::ERR_IO_PENDING,
|
| + entry->WriteData(1, 0, buffer1.get(), kSize1, cb.callback(), false));
|
| + EXPECT_EQ(kSize1, cb.GetResult(net::ERR_IO_PENDING));
|
| +
|
| + EXPECT_EQ(net::ERR_IO_PENDING,
|
| + entry->ReadData(1, 0, buffer1.get(), kSize1, cb.callback()));
|
| + EXPECT_EQ(kSize1, cb.GetResult(net::ERR_IO_PENDING));
|
| +
|
| + // Check that we are not leaking.
|
| + EXPECT_TRUE(
|
| + static_cast<disk_cache::SimpleEntryImpl*>(entry)->HasOneRef());
|
| +}
|
| +
|
| +TEST_P(DiskCacheSimpleEntryTest, Optimistic6) {
|
| + // Test sequence:
|
| + // Create, Write, Doom, Doom, Read, Doom, Close.
|
| +
|
| + // Only net::DISK_CACHE runs optimistic operations.
|
| + TEST_DISABLED_IF(cache_type() != net::DISK_CACHE);
|
| + InitCache();
|
| + disk_cache::Entry* null = NULL;
|
| + const char key[] = "the first key";
|
| +
|
| + net::TestCompletionCallback cb;
|
| + const int kSize1 = 10;
|
| + scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize1));
|
| + scoped_refptr<net::IOBuffer> buffer1_read(new net::IOBuffer(kSize1));
|
| + CacheTestFillBuffer(buffer1->data(), kSize1, false);
|
| + disk_cache::Entry* entry = NULL;
|
| +
|
| + ASSERT_EQ(net::OK,
|
| + cache()->CreateEntry(key, &entry, net::CompletionCallback()));
|
| + EXPECT_NE(null, entry);
|
| + ScopedEntryPtr entry_closer(entry);
|
| +
|
| + EXPECT_EQ(
|
| + net::ERR_IO_PENDING,
|
| + entry->WriteData(1, 0, buffer1.get(), kSize1, cb.callback(), false));
|
| + EXPECT_EQ(kSize1, cb.GetResult(net::ERR_IO_PENDING));
|
| +
|
| + entry->Doom();
|
| + entry->Doom();
|
| +
|
| + // This Read must not be optimistic, since we don't support that yet.
|
| + EXPECT_EQ(net::ERR_IO_PENDING,
|
| + entry->ReadData(1, 0, buffer1_read.get(), kSize1, cb.callback()));
|
| + EXPECT_EQ(kSize1, cb.GetResult(net::ERR_IO_PENDING));
|
| + EXPECT_EQ(0, memcmp(buffer1->data(), buffer1_read->data(), kSize1));
|
| +
|
| + entry->Doom();
|
| +}
|
| +
|
| +// Confirm that IO buffers are not referenced by the Simple Cache after a write
|
| +// completes.
|
| +TEST_P(DiskCacheSimpleEntryTest, OptimisticWriteReleases) {
|
| + // Only net::DISK_CACHE runs optimistic operations.
|
| + TEST_DISABLED_IF(cache_type() != net::DISK_CACHE);
|
| +
|
| + InitCache();
|
| +
|
| + const char key[] = "the first key";
|
| + disk_cache::Entry* entry = NULL;
|
| +
|
| + // First, an optimistic create.
|
| + ASSERT_EQ(net::OK,
|
| + cache()->CreateEntry(key, &entry, net::CompletionCallback()));
|
| + ASSERT_TRUE(entry);
|
| + ScopedEntryPtr entry_closer(entry);
|
| +
|
| + const int kWriteSize = 512;
|
| + scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kWriteSize));
|
| + EXPECT_TRUE(buffer1->HasOneRef());
|
| + CacheTestFillBuffer(buffer1->data(), kWriteSize, false);
|
| +
|
| + // An optimistic write happens only when there is an empty queue of pending
|
| + // operations. To ensure the queue is empty, we issue a write and wait until
|
| + // it completes.
|
| + EXPECT_EQ(kWriteSize,
|
| + WriteData(entry, 1, 0, buffer1.get(), kWriteSize, false));
|
| + EXPECT_TRUE(buffer1->HasOneRef());
|
| +
|
| + // Finally, we should perform an optimistic write and confirm that all
|
| + // references to the IO buffer have been released.
|
| + EXPECT_EQ(
|
| + kWriteSize,
|
| + entry->WriteData(
|
| + 1, 0, buffer1.get(), kWriteSize, net::CompletionCallback(), false));
|
| + EXPECT_TRUE(buffer1->HasOneRef());
|
| +}
|
| +
|
| +TEST_P(DiskCacheSimpleEntryTest, CreateDoomRace) {
|
| + // Test sequence:
|
| + // Create, Doom, Write, Close, Check files are not on disk anymore.
|
| + InitCache();
|
| + disk_cache::Entry* null = NULL;
|
| + const char key[] = "the first key";
|
| +
|
| + net::TestCompletionCallback cb;
|
| + const int kSize1 = 10;
|
| + scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize1));
|
| + CacheTestFillBuffer(buffer1->data(), kSize1, false);
|
| + disk_cache::Entry* entry = NULL;
|
| +
|
| + ASSERT_EQ(net::OK,
|
| + cache()->CreateEntry(key, &entry, net::CompletionCallback()));
|
| + EXPECT_NE(null, entry);
|
| +
|
| + EXPECT_EQ(net::ERR_IO_PENDING, cache()->DoomEntry(key, cb.callback()));
|
| + EXPECT_EQ(net::OK, cb.GetResult(net::ERR_IO_PENDING));
|
| +
|
| + EXPECT_EQ(
|
| + kSize1,
|
| + entry->WriteData(0, 0, buffer1.get(), kSize1, cb.callback(), false));
|
| +
|
| + entry->Close();
|
| +
|
| + // Finish running the pending tasks so that we fully complete the close
|
| + // operation and destroy the entry object.
|
| + base::MessageLoop::current()->RunUntilIdle();
|
| +
|
| + for (int i = 0; i < disk_cache::kSimpleEntryFileCount; ++i) {
|
| + base::FilePath entry_file_path = cache_path().AppendASCII(
|
| + disk_cache::simple_util::GetFilenameFromKeyAndFileIndex(key, i));
|
| + base::File::Info info;
|
| + EXPECT_FALSE(base::GetFileInfo(entry_file_path, &info));
|
| + }
|
| +}
|
| +
|
| +TEST_P(DiskCacheSimpleEntryTest, DoomCreateRace) {
|
| + // This test runs as APP_CACHE to make operations more synchronous. Test
|
| + // sequence:
|
| + // Create, Doom, Create.
|
| + TEST_DISABLED_IF(cache_type() != net::APP_CACHE);
|
| + InitCache();
|
| + disk_cache::Entry* null = NULL;
|
| + const char key[] = "the first key";
|
| +
|
| + net::TestCompletionCallback create_callback;
|
| +
|
| + disk_cache::Entry* entry1 = NULL;
|
| + ASSERT_EQ(net::OK,
|
| + create_callback.GetResult(
|
| + cache()->CreateEntry(key, &entry1, create_callback.callback())));
|
| + ScopedEntryPtr entry1_closer(entry1);
|
| + EXPECT_NE(null, entry1);
|
| +
|
| + net::TestCompletionCallback doom_callback;
|
| + EXPECT_EQ(net::ERR_IO_PENDING,
|
| + cache()->DoomEntry(key, doom_callback.callback()));
|
| +
|
| + disk_cache::Entry* entry2 = NULL;
|
| + ASSERT_EQ(net::OK,
|
| + create_callback.GetResult(
|
| + cache()->CreateEntry(key, &entry2, create_callback.callback())));
|
| + ScopedEntryPtr entry2_closer(entry2);
|
| + EXPECT_EQ(net::OK, doom_callback.GetResult(net::ERR_IO_PENDING));
|
| +}
|
| +
|
| +TEST_P(DiskCacheSimpleEntryTest, DoomDoom) {
|
| + // Test sequence:
|
| + // Create, Doom, Create, Doom (1st entry), Open.
|
| + InitCache();
|
| + disk_cache::Entry* null = NULL;
|
| +
|
| + const char key[] = "the first key";
|
| +
|
| + disk_cache::Entry* entry1 = NULL;
|
| + ASSERT_EQ(net::OK, CreateEntry(key, &entry1));
|
| + ScopedEntryPtr entry1_closer(entry1);
|
| + EXPECT_NE(null, entry1);
|
| +
|
| + EXPECT_EQ(net::OK, DoomEntry(key));
|
| +
|
| + disk_cache::Entry* entry2 = NULL;
|
| + ASSERT_EQ(net::OK, CreateEntry(key, &entry2));
|
| + ScopedEntryPtr entry2_closer(entry2);
|
| + EXPECT_NE(null, entry2);
|
| +
|
| + // Redundantly dooming entry1 should not delete entry2.
|
| + disk_cache::SimpleEntryImpl* simple_entry1 =
|
| + static_cast<disk_cache::SimpleEntryImpl*>(entry1);
|
| + net::TestCompletionCallback cb;
|
| + EXPECT_EQ(net::OK,
|
| + cb.GetResult(simple_entry1->DoomEntry(cb.callback())));
|
| +
|
| + disk_cache::Entry* entry3 = NULL;
|
| + ASSERT_EQ(net::OK, OpenEntry(key, &entry3));
|
| + ScopedEntryPtr entry3_closer(entry3);
|
| + EXPECT_NE(null, entry3);
|
| +}
|
| +
|
| +TEST_P(DiskCacheSimpleEntryTest, DoomCreateDoom) {
|
| + // Test sequence:
|
| + // Create, Doom, Create, Doom.
|
| + InitCache();
|
| +
|
| + disk_cache::Entry* null = NULL;
|
| +
|
| + const char key[] = "the first key";
|
| +
|
| + disk_cache::Entry* entry1 = NULL;
|
| + ASSERT_EQ(net::OK, CreateEntry(key, &entry1));
|
| + ScopedEntryPtr entry1_closer(entry1);
|
| + EXPECT_NE(null, entry1);
|
| +
|
| + entry1->Doom();
|
| +
|
| + disk_cache::Entry* entry2 = NULL;
|
| + ASSERT_EQ(net::OK, CreateEntry(key, &entry2));
|
| + ScopedEntryPtr entry2_closer(entry2);
|
| + EXPECT_NE(null, entry2);
|
| +
|
| + entry2->Doom();
|
| +
|
| + // This test passes if it doesn't crash.
|
| +}
|
| +
|
| +// Checks that an optimistic Create would fail later on a racing Open.
|
| +TEST_P(DiskCacheSimpleEntryTest, OptimisticCreateFailsOnOpen) {
|
| + InitCache();
|
| +
|
| + // Create a corrupt file in place of a future entry. Optimistic create should
|
| + // initially succeed, but realize later that creation failed.
|
| + const std::string key = "the key";
|
| + net::TestCompletionCallback cb;
|
| + disk_cache::Entry* entry = NULL;
|
| + disk_cache::Entry* entry2 = NULL;
|
| +
|
| + EXPECT_TRUE(disk_cache::simple_util::CreateCorruptFileForTests(
|
| + key, cache_path()));
|
| + EXPECT_EQ(net::OK, cache()->CreateEntry(key, &entry, cb.callback()));
|
| + ASSERT_TRUE(entry);
|
| + ScopedEntryPtr entry_closer(entry);
|
| + ASSERT_NE(net::OK, OpenEntry(key, &entry2));
|
| +
|
| + // Check that we are not leaking.
|
| + EXPECT_TRUE(
|
| + static_cast<disk_cache::SimpleEntryImpl*>(entry)->HasOneRef());
|
| +}
|
| +
|
| +// Tests that old entries are evicted while new entries remain in the index.
|
| +// This test relies on non-mandatory properties of the simple Cache Backend:
|
| +// LRU eviction, specific values of high-watermark and low-watermark etc.
|
| +// When changing the eviction algorithm, the test will have to be re-engineered.
|
| +TEST_P(DiskCacheSimpleEntryTest, EvictOldEntries) {
|
| + const int kMaxSize = 200 * 1024;
|
| + const int kWriteSize = kMaxSize / 10;
|
| + const int kNumExtraEntries = 12;
|
| + SetMaxSize(kMaxSize);
|
| + InitCache();
|
| +
|
| + std::string key1("the first key");
|
| + disk_cache::Entry* entry;
|
| + ASSERT_EQ(net::OK, CreateEntry(key1, &entry));
|
| + scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kWriteSize));
|
| + CacheTestFillBuffer(buffer->data(), kWriteSize, false);
|
| + EXPECT_EQ(kWriteSize,
|
| + WriteData(entry, 1, 0, buffer.get(), kWriteSize, false));
|
| + entry->Close();
|
| + AddDelay();
|
| +
|
| + std::string key2("the key prefix");
|
| + for (int i = 0; i < kNumExtraEntries; i++) {
|
| + ASSERT_EQ(net::OK, CreateEntry(key2 + base::StringPrintf("%d", i), &entry));
|
| + ScopedEntryPtr entry_closer(entry);
|
| + EXPECT_EQ(kWriteSize,
|
| + WriteData(entry, 1, 0, buffer.get(), kWriteSize, false));
|
| + }
|
| +
|
| + // TODO(pasko): Find a way to wait for the eviction task(s) to finish by using
|
| + // the internal knowledge about |SimpleBackendImpl|.
|
| + ASSERT_NE(net::OK, OpenEntry(key1, &entry))
|
| + << "Should have evicted the old entry";
|
| + for (int i = 0; i < 2; i++) {
|
| + int entry_no = kNumExtraEntries - i - 1;
|
| + // Generally there is no guarantee that at this point the backround eviction
|
| + // is finished. We are testing the positive case, i.e. when the eviction
|
| + // never reaches this entry, should be non-flaky.
|
| + ASSERT_EQ(net::OK, OpenEntry(key2 + base::StringPrintf("%d", entry_no),
|
| + &entry))
|
| + << "Should not have evicted fresh entry " << entry_no;
|
| + entry->Close();
|
| + }
|
| +}
|
| +
|
| +// Tests that if a read and a following in-flight truncate are both in progress
|
| +// simultaniously that they both can occur successfully. See
|
| +// http://crbug.com/239223
|
| +TEST_P(DiskCacheSimpleEntryTest, InFlightTruncate) {
|
| + InitCache();
|
| +
|
| + const char key[] = "the first key";
|
| +
|
| + const int kBufferSize = 1024;
|
| + scoped_refptr<net::IOBuffer> write_buffer(new net::IOBuffer(kBufferSize));
|
| + CacheTestFillBuffer(write_buffer->data(), kBufferSize, false);
|
| +
|
| + disk_cache::Entry* entry = NULL;
|
| + ASSERT_EQ(net::OK, CreateEntry(key, &entry));
|
| +
|
| + EXPECT_EQ(kBufferSize,
|
| + WriteData(entry, 1, 0, write_buffer.get(), kBufferSize, false));
|
| + entry->Close();
|
| + entry = NULL;
|
| +
|
| + ASSERT_EQ(net::OK, OpenEntry(key, &entry));
|
| + ScopedEntryPtr entry_closer(entry);
|
| +
|
| + MessageLoopHelper helper;
|
| + int expected = 0;
|
| +
|
| + // Make a short read.
|
| + const int kReadBufferSize = 512;
|
| + scoped_refptr<net::IOBuffer> read_buffer(new net::IOBuffer(kReadBufferSize));
|
| + CallbackTest read_callback(&helper, false);
|
| + EXPECT_EQ(net::ERR_IO_PENDING,
|
| + entry->ReadData(1,
|
| + 0,
|
| + read_buffer.get(),
|
| + kReadBufferSize,
|
| + base::Bind(&CallbackTest::Run,
|
| + base::Unretained(&read_callback))));
|
| + ++expected;
|
| +
|
| + // Truncate the entry to the length of that read.
|
| + scoped_refptr<net::IOBuffer>
|
| + truncate_buffer(new net::IOBuffer(kReadBufferSize));
|
| + CacheTestFillBuffer(truncate_buffer->data(), kReadBufferSize, false);
|
| + CallbackTest truncate_callback(&helper, false);
|
| + EXPECT_EQ(net::ERR_IO_PENDING,
|
| + entry->WriteData(1,
|
| + 0,
|
| + truncate_buffer.get(),
|
| + kReadBufferSize,
|
| + base::Bind(&CallbackTest::Run,
|
| + base::Unretained(&truncate_callback)),
|
| + true));
|
| + ++expected;
|
| +
|
| + // Wait for both the read and truncation to finish, and confirm that both
|
| + // succeeded.
|
| + EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
|
| + EXPECT_EQ(kReadBufferSize, read_callback.last_result());
|
| + EXPECT_EQ(kReadBufferSize, truncate_callback.last_result());
|
| + EXPECT_EQ(0,
|
| + memcmp(write_buffer->data(), read_buffer->data(), kReadBufferSize));
|
| +}
|
| +
|
| +// Tests that if a write and a read dependant on it are both in flight
|
| +// simultaneiously that they both can complete successfully without erroneous
|
| +// early returns. See http://crbug.com/239223
|
| +TEST_P(DiskCacheSimpleEntryTest, InFlightRead) {
|
| + InitCache();
|
| +
|
| + const char key[] = "the first key";
|
| + disk_cache::Entry* entry = NULL;
|
| + ASSERT_EQ(net::OK,
|
| + cache()->CreateEntry(key, &entry, net::CompletionCallback()));
|
| + ScopedEntryPtr entry_closer(entry);
|
| +
|
| + const int kBufferSize = 1024;
|
| + scoped_refptr<net::IOBuffer> write_buffer(new net::IOBuffer(kBufferSize));
|
| + CacheTestFillBuffer(write_buffer->data(), kBufferSize, false);
|
| +
|
| + MessageLoopHelper helper;
|
| + int expected = 0;
|
| +
|
| + CallbackTest write_callback(&helper, false);
|
| + EXPECT_EQ(net::ERR_IO_PENDING,
|
| + entry->WriteData(1,
|
| + 0,
|
| + write_buffer.get(),
|
| + kBufferSize,
|
| + base::Bind(&CallbackTest::Run,
|
| + base::Unretained(&write_callback)),
|
| + true));
|
| + ++expected;
|
| +
|
| + scoped_refptr<net::IOBuffer> read_buffer(new net::IOBuffer(kBufferSize));
|
| + CallbackTest read_callback(&helper, false);
|
| + EXPECT_EQ(net::ERR_IO_PENDING,
|
| + entry->ReadData(1,
|
| + 0,
|
| + read_buffer.get(),
|
| + kBufferSize,
|
| + base::Bind(&CallbackTest::Run,
|
| + base::Unretained(&read_callback))));
|
| + ++expected;
|
| +
|
| + EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
|
| + EXPECT_EQ(kBufferSize, write_callback.last_result());
|
| + EXPECT_EQ(kBufferSize, read_callback.last_result());
|
| + EXPECT_EQ(0, memcmp(write_buffer->data(), read_buffer->data(), kBufferSize));
|
| +}
|
| +
|
| +TEST_P(DiskCacheSimpleEntryTest, OpenCreateRaceWithNoIndex) {
|
| + SimpleCacheCreateBackendExtraData extra_data;
|
| + extra_data.set_do_not_wait_for_index(true);
|
| + DisableIntegrityCheck();
|
| + InitCacheWithExtraData(&extra_data);
|
| +
|
| + // Assume the index is not initialized, which is likely, since we are blocking
|
| + // the IO thread from executing the index finalization step.
|
| + disk_cache::Entry* entry1;
|
| + net::TestCompletionCallback cb1;
|
| + disk_cache::Entry* entry2;
|
| + net::TestCompletionCallback cb2;
|
| + int rv1 = cache()->OpenEntry("key", &entry1, cb1.callback());
|
| + int rv2 = cache()->CreateEntry("key", &entry2, cb2.callback());
|
| +
|
| + EXPECT_EQ(net::ERR_FAILED, cb1.GetResult(rv1));
|
| + ASSERT_EQ(net::OK, cb2.GetResult(rv2));
|
| + entry2->Close();
|
| +}
|
| +
|
| +// Checks that reading two entries simultaneously does not discard a CRC check.
|
| +// TODO(pasko): make it work with Simple Cache.
|
| +TEST_P(DiskCacheSimpleEntryTest, DISABLED_SimpleCacheMultipleReadersCheckCRC) {
|
| + InitCache();
|
| +
|
| + const char key[] = "key";
|
| +
|
| + int size;
|
| + ASSERT_TRUE(MakeBadChecksumEntry(key, this, &size));
|
| +
|
| + scoped_refptr<net::IOBuffer> read_buffer1(new net::IOBuffer(size));
|
| + scoped_refptr<net::IOBuffer> read_buffer2(new net::IOBuffer(size));
|
| +
|
| + // Advance the first reader a little.
|
| + disk_cache::Entry* entry = NULL;
|
| + ASSERT_EQ(net::OK, OpenEntry(key, &entry));
|
| + EXPECT_EQ(1, ReadData(entry, 0, 0, read_buffer1.get(), 1));
|
| +
|
| + // Make the second reader pass the point where the first one is, and close.
|
| + disk_cache::Entry* entry2 = NULL;
|
| + EXPECT_EQ(net::OK, OpenEntry(key, &entry2));
|
| + EXPECT_EQ(1, ReadData(entry2, 0, 0, read_buffer2.get(), 1));
|
| + EXPECT_EQ(1, ReadData(entry2, 0, 1, read_buffer2.get(), 1));
|
| + entry2->Close();
|
| +
|
| + // Read the data till the end should produce an error.
|
| + EXPECT_GT(0, ReadData(entry, 0, 1, read_buffer1.get(), size));
|
| + entry->Close();
|
| + DisableIntegrityCheck();
|
| +}
|
| +
|
| +// Checking one more scenario of overlapped reading of a bad entry.
|
| +// Differs from the |SimpleCacheMultipleReadersCheckCRC| only by the order of
|
| +// last two reads.
|
| +TEST_P(DiskCacheSimpleEntryTest, MultipleReadersCheckCRC2) {
|
| + InitCache();
|
| +
|
| + const char key[] = "key";
|
| + int size;
|
| + ASSERT_TRUE(MakeBadChecksumEntry(key, this, &size));
|
| +
|
| + scoped_refptr<net::IOBuffer> read_buffer1(new net::IOBuffer(size));
|
| + scoped_refptr<net::IOBuffer> read_buffer2(new net::IOBuffer(size));
|
| +
|
| + // Advance the first reader a little.
|
| + disk_cache::Entry* entry = NULL;
|
| + ASSERT_EQ(net::OK, OpenEntry(key, &entry));
|
| + ScopedEntryPtr entry_closer(entry);
|
| + EXPECT_EQ(1, ReadData(entry, 1, 0, read_buffer1.get(), 1));
|
| +
|
| + // Advance the 2nd reader by the same amount.
|
| + disk_cache::Entry* entry2 = NULL;
|
| + EXPECT_EQ(net::OK, OpenEntry(key, &entry2));
|
| + ScopedEntryPtr entry2_closer(entry2);
|
| + EXPECT_EQ(1, ReadData(entry2, 1, 0, read_buffer2.get(), 1));
|
| +
|
| + // Continue reading 1st.
|
| + EXPECT_GT(0, ReadData(entry, 1, 1, read_buffer1.get(), size));
|
| +
|
| + // This read should fail as well because we have previous read failures.
|
| + EXPECT_GT(0, ReadData(entry2, 1, 1, read_buffer2.get(), 1));
|
| + DisableIntegrityCheck();
|
| +}
|
| +
|
| +// Test if we can sequentially read each subset of the data until all the data
|
| +// is read, then the CRC is calculated correctly and the reads are successful.
|
| +TEST_P(DiskCacheSimpleEntryTest, ReadCombineCRC) {
|
| + // Test sequence:
|
| + // Create, Write, Read (first half of data), Read (second half of data),
|
| + // Close.
|
| + InitCache();
|
| + disk_cache::Entry* null = NULL;
|
| + const char key[] = "the first key";
|
| +
|
| + const int kHalfSize = 200;
|
| + const int kSize = 2 * kHalfSize;
|
| + scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize));
|
| + CacheTestFillBuffer(buffer1->data(), kSize, false);
|
| + disk_cache::Entry* entry = NULL;
|
| +
|
| + ASSERT_EQ(net::OK, CreateEntry(key, &entry));
|
| + EXPECT_NE(null, entry);
|
| +
|
| + EXPECT_EQ(kSize, WriteData(entry, 1, 0, buffer1.get(), kSize, false));
|
| + entry->Close();
|
| +
|
| + disk_cache::Entry* entry2 = NULL;
|
| + ASSERT_EQ(net::OK, OpenEntry(key, &entry2));
|
| + EXPECT_EQ(entry, entry2);
|
| +
|
| + // Read the first half of the data.
|
| + int offset = 0;
|
| + int buf_len = kHalfSize;
|
| + scoped_refptr<net::IOBuffer> buffer1_read1(new net::IOBuffer(buf_len));
|
| + EXPECT_EQ(buf_len, ReadData(entry2, 1, offset, buffer1_read1.get(), buf_len));
|
| + EXPECT_EQ(0, memcmp(buffer1->data(), buffer1_read1->data(), buf_len));
|
| +
|
| + // Read the second half of the data.
|
| + offset = buf_len;
|
| + buf_len = kHalfSize;
|
| + scoped_refptr<net::IOBuffer> buffer1_read2(new net::IOBuffer(buf_len));
|
| + EXPECT_EQ(buf_len, ReadData(entry2, 1, offset, buffer1_read2.get(), buf_len));
|
| + char* buffer1_data = buffer1->data() + offset;
|
| + EXPECT_EQ(0, memcmp(buffer1_data, buffer1_read2->data(), buf_len));
|
| +
|
| + // Check that we are not leaking.
|
| + EXPECT_NE(entry, null);
|
| + EXPECT_TRUE(
|
| + static_cast<disk_cache::SimpleEntryImpl*>(entry)->HasOneRef());
|
| + entry->Close();
|
| + entry = NULL;
|
| +}
|
| +
|
| +// Test if we can write the data not in sequence and read correctly. In
|
| +// this case the CRC will not be present.
|
| +TEST_P(DiskCacheSimpleEntryTest, NonSequentialWrite) {
|
| + // Test sequence:
|
| + // Create, Write (second half of data), Write (first half of data), Read,
|
| + // Close.
|
| + InitCache();
|
| + disk_cache::Entry* null = NULL;
|
| + const char key[] = "the first key";
|
| +
|
| + const int kHalfSize = 200;
|
| + const int kSize = 2 * kHalfSize;
|
| + scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize));
|
| + scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize));
|
| + CacheTestFillBuffer(buffer1->data(), kSize, false);
|
| + char* buffer1_data = buffer1->data() + kHalfSize;
|
| + memcpy(buffer2->data(), buffer1_data, kHalfSize);
|
| + disk_cache::Entry* entry = NULL;
|
| +
|
| + ASSERT_EQ(net::OK, CreateEntry(key, &entry));
|
| + EXPECT_NE(null, entry);
|
| +
|
| + int offset = kHalfSize;
|
| + int buf_len = kHalfSize;
|
| +
|
| + EXPECT_EQ(buf_len,
|
| + WriteData(entry, 0, offset, buffer2.get(), buf_len, false));
|
| + offset = 0;
|
| + buf_len = kHalfSize;
|
| + EXPECT_EQ(buf_len,
|
| + WriteData(entry, 0, offset, buffer1.get(), buf_len, false));
|
| + entry->Close();
|
| +
|
| + disk_cache::Entry* entry2 = NULL;
|
| + ASSERT_EQ(net::OK, OpenEntry(key, &entry2));
|
| + EXPECT_EQ(entry, entry2);
|
| +
|
| + scoped_refptr<net::IOBuffer> buffer1_read1(new net::IOBuffer(kSize));
|
| + EXPECT_EQ(kSize, ReadData(entry2, 0, 0, buffer1_read1.get(), kSize));
|
| + EXPECT_EQ(0, memcmp(buffer1->data(), buffer1_read1->data(), kSize));
|
| +
|
| + // Check that we are not leaking.
|
| + ASSERT_NE(entry, null);
|
| + EXPECT_TRUE(
|
| + static_cast<disk_cache::SimpleEntryImpl*>(entry)->HasOneRef());
|
| + entry->Close();
|
| + entry = NULL;
|
| +}
|
| +
|
| +// Test that changing stream1 size does not affect stream0 (stream0 and stream1
|
| +// are stored in the same file in Simple Cache).
|
| +TEST_P(DiskCacheSimpleEntryTest, Stream1SizeChanges) {
|
| + InitCache();
|
| + disk_cache::Entry* entry = NULL;
|
| + const char key[] = "the key";
|
| + const int kSize = 100;
|
| + scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
|
| + scoped_refptr<net::IOBuffer> buffer_read(new net::IOBuffer(kSize));
|
| + CacheTestFillBuffer(buffer->data(), kSize, false);
|
| +
|
| + ASSERT_EQ(net::OK, CreateEntry(key, &entry));
|
| + EXPECT_TRUE(entry);
|
| +
|
| + // Write something into stream0.
|
| + EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false));
|
| + EXPECT_EQ(kSize, ReadData(entry, 0, 0, buffer_read.get(), kSize));
|
| + EXPECT_EQ(0, memcmp(buffer->data(), buffer_read->data(), kSize));
|
| + entry->Close();
|
| +
|
| + // Extend stream1.
|
| + ASSERT_EQ(net::OK, OpenEntry(key, &entry));
|
| + int stream1_size = 100;
|
| + EXPECT_EQ(0, WriteData(entry, 1, stream1_size, buffer.get(), 0, false));
|
| + EXPECT_EQ(stream1_size, entry->GetDataSize(1));
|
| + entry->Close();
|
| +
|
| + // Check that stream0 data has not been modified and that the EOF record for
|
| + // stream 0 contains a crc.
|
| + // The entry needs to be reopened before checking the crc: Open will perform
|
| + // the synchronization with the previous Close. This ensures the EOF records
|
| + // have been written to disk before we attempt to read them independently.
|
| + ASSERT_EQ(net::OK, OpenEntry(key, &entry));
|
| + base::FilePath entry_file0_path = cache_path().AppendASCII(
|
| + disk_cache::simple_util::GetFilenameFromKeyAndFileIndex(key, 0));
|
| + int flags = base::PLATFORM_FILE_READ | base::PLATFORM_FILE_OPEN;
|
| + base::PlatformFile entry_file0 =
|
| + base::CreatePlatformFile(entry_file0_path, flags, NULL, NULL);
|
| + ASSERT_TRUE(entry_file0 != base::kInvalidPlatformFileValue);
|
| +
|
| + int data_size[disk_cache::kSimpleEntryStreamCount] = {kSize, stream1_size, 0};
|
| + int sparse_data_size = 0;
|
| + disk_cache::SimpleEntryStat entry_stat(
|
| + base::Time::Now(), base::Time::Now(), data_size, sparse_data_size);
|
| + int eof_offset = entry_stat.GetEOFOffsetInFile(key, 0);
|
| + disk_cache::SimpleFileEOF eof_record;
|
| + ASSERT_EQ(static_cast<int>(sizeof(eof_record)), base::ReadPlatformFile(
|
| + entry_file0,
|
| + eof_offset,
|
| + reinterpret_cast<char*>(&eof_record),
|
| + sizeof(eof_record)));
|
| + EXPECT_EQ(disk_cache::kSimpleFinalMagicNumber, eof_record.final_magic_number);
|
| + EXPECT_TRUE((eof_record.flags & disk_cache::SimpleFileEOF::FLAG_HAS_CRC32) ==
|
| + disk_cache::SimpleFileEOF::FLAG_HAS_CRC32);
|
| +
|
| + buffer_read = new net::IOBuffer(kSize);
|
| + EXPECT_EQ(kSize, ReadData(entry, 0, 0, buffer_read.get(), kSize));
|
| + EXPECT_EQ(0, memcmp(buffer->data(), buffer_read->data(), kSize));
|
| +
|
| + // Shrink stream1.
|
| + stream1_size = 50;
|
| + EXPECT_EQ(0, WriteData(entry, 1, stream1_size, buffer.get(), 0, true));
|
| + EXPECT_EQ(stream1_size, entry->GetDataSize(1));
|
| + entry->Close();
|
| +
|
| + // Check that stream0 data has not been modified.
|
| + buffer_read = new net::IOBuffer(kSize);
|
| + ASSERT_EQ(net::OK, OpenEntry(key, &entry));
|
| + EXPECT_EQ(kSize, ReadData(entry, 0, 0, buffer_read.get(), kSize));
|
| + EXPECT_EQ(0, memcmp(buffer->data(), buffer_read->data(), kSize));
|
| + entry->Close();
|
| + entry = NULL;
|
| +}
|
| +
|
| +// Test that writing within the range for which the crc has already been
|
| +// computed will properly invalidate the computed crc.
|
| +TEST_P(DiskCacheSimpleEntryTest, CRCRewrite) {
|
| + // Test sequence:
|
| + // Create, Write (big data), Write (small data in the middle), Close.
|
| + // Open, Read (all), Close.
|
| + InitCache();
|
| + disk_cache::Entry* null = NULL;
|
| + const char key[] = "the first key";
|
| +
|
| + const int kHalfSize = 200;
|
| + const int kSize = 2 * kHalfSize;
|
| + scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize));
|
| + scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kHalfSize));
|
| + CacheTestFillBuffer(buffer1->data(), kSize, false);
|
| + CacheTestFillBuffer(buffer2->data(), kHalfSize, false);
|
| +
|
| + disk_cache::Entry* entry = NULL;
|
| + ASSERT_EQ(net::OK, CreateEntry(key, &entry));
|
| + EXPECT_NE(null, entry);
|
| + entry->Close();
|
| +
|
| + for (int i = 0; i < disk_cache::kSimpleEntryStreamCount; ++i) {
|
| + ASSERT_EQ(net::OK, OpenEntry(key, &entry));
|
| + int offset = 0;
|
| + int buf_len = kSize;
|
| +
|
| + EXPECT_EQ(buf_len,
|
| + WriteData(entry, i, offset, buffer1.get(), buf_len, false));
|
| + offset = kHalfSize;
|
| + buf_len = kHalfSize;
|
| + EXPECT_EQ(buf_len,
|
| + WriteData(entry, i, offset, buffer2.get(), buf_len, false));
|
| + entry->Close();
|
| +
|
| + ASSERT_EQ(net::OK, OpenEntry(key, &entry));
|
| +
|
| + scoped_refptr<net::IOBuffer> buffer1_read1(new net::IOBuffer(kSize));
|
| + EXPECT_EQ(kSize, ReadData(entry, i, 0, buffer1_read1.get(), kSize));
|
| + EXPECT_EQ(0, memcmp(buffer1->data(), buffer1_read1->data(), kHalfSize));
|
| + EXPECT_EQ(
|
| + 0,
|
| + memcmp(buffer2->data(), buffer1_read1->data() + kHalfSize, kHalfSize));
|
| +
|
| + entry->Close();
|
| + }
|
| +}
|
| +
|
| +bool ThirdStreamFileExists(const base::FilePath& cache_path,
|
| + const char* key) {
|
| + int third_stream_file_index =
|
| + disk_cache::simple_util::GetFileIndexFromStreamIndex(2);
|
| + base::FilePath third_stream_file_path = cache_path.AppendASCII(
|
| + disk_cache::simple_util::GetFilenameFromKeyAndFileIndex(
|
| + key, third_stream_file_index));
|
| + return PathExists(third_stream_file_path);
|
| +}
|
| +
|
| +// Check that a newly-created entry with no third-stream writes omits the
|
| +// third stream file.
|
| +TEST_P(DiskCacheSimpleEntryTest, OmittedThirdStream1) {
|
| + InitCache();
|
| +
|
| + const char key[] = "key";
|
| +
|
| + disk_cache::Entry* entry;
|
| +
|
| + // Create entry and close without writing: third stream file should be
|
| + // omitted, since the stream is empty.
|
| + ASSERT_EQ(net::OK, CreateEntry(key, &entry));
|
| + entry->Close();
|
| + EXPECT_FALSE(ThirdStreamFileExists(cache_path(), key));
|
| +
|
| + DoomEntry(key);
|
| + EXPECT_FALSE(ThirdStreamFileExists(cache_path(), key));
|
| +}
|
| +
|
| +// Check that a newly-created entry with only a single zero-offset, zero-length
|
| +// write omits the third stream file.
|
| +TEST_P(DiskCacheSimpleEntryTest, OmittedThirdStream2) {
|
| + InitCache();
|
| +
|
| + const int kHalfSize = 8;
|
| + const int kSize = kHalfSize * 2;
|
| + const char key[] = "key";
|
| + scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
|
| + CacheTestFillBuffer(buffer->data(), kHalfSize, false);
|
| +
|
| + disk_cache::Entry* entry;
|
| +
|
| + // Create entry, write empty buffer to third stream, and close: third stream
|
| + // should still be omitted, since the entry ignores writes that don't modify
|
| + // data or change the length.
|
| + ASSERT_EQ(net::OK, CreateEntry(key, &entry));
|
| + EXPECT_EQ(0, WriteData(entry, 2, 0, buffer, 0, true));
|
| + entry->Close();
|
| + EXPECT_FALSE(ThirdStreamFileExists(cache_path(), key));
|
| +
|
| + DoomEntry(key);
|
| + EXPECT_FALSE(ThirdStreamFileExists(cache_path(), key));
|
| +}
|
| +
|
| +// Check that we can read back data written to the third stream.
|
| +TEST_P(DiskCacheSimpleEntryTest, OmittedThirdStream3) {
|
| + InitCache();
|
| +
|
| + const int kHalfSize = 8;
|
| + const int kSize = kHalfSize * 2;
|
| + const char key[] = "key";
|
| + scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize));
|
| + scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize));
|
| + CacheTestFillBuffer(buffer1->data(), kHalfSize, false);
|
| +
|
| + disk_cache::Entry* entry;
|
| +
|
| + // Create entry, write data to third stream, and close: third stream should
|
| + // not be omitted, since it contains data. Re-open entry and ensure there
|
| + // are that many bytes in the third stream.
|
| + ASSERT_EQ(net::OK, CreateEntry(key, &entry));
|
| + EXPECT_EQ(kHalfSize, WriteData(entry, 2, 0, buffer1, kHalfSize, true));
|
| + entry->Close();
|
| + EXPECT_TRUE(ThirdStreamFileExists(cache_path(), key));
|
| +
|
| + ASSERT_EQ(net::OK, OpenEntry(key, &entry));
|
| + EXPECT_EQ(kHalfSize, ReadData(entry, 2, 0, buffer2, kSize));
|
| + EXPECT_EQ(0, memcmp(buffer1->data(), buffer2->data(), kHalfSize));
|
| + entry->Close();
|
| + EXPECT_TRUE(ThirdStreamFileExists(cache_path(), key));
|
| +
|
| + DoomEntry(key);
|
| + EXPECT_FALSE(ThirdStreamFileExists(cache_path(), key));
|
| +}
|
| +
|
| +// Check that we remove the third stream file upon opening an entry and finding
|
| +// the third stream empty. (This is the upgrade path for entries written
|
| +// before the third stream was optional.)
|
| +TEST_P(DiskCacheSimpleEntryTest, OmittedThirdStream4) {
|
| + InitCache();
|
| +
|
| + const int kHalfSize = 8;
|
| + const int kSize = kHalfSize * 2;
|
| + const char key[] = "key";
|
| + scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize));
|
| + scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize));
|
| + CacheTestFillBuffer(buffer1->data(), kHalfSize, false);
|
| +
|
| + disk_cache::Entry* entry;
|
| +
|
| + // Create entry, write data to third stream, truncate third stream back to
|
| + // empty, and close: third stream will not initially be omitted, since entry
|
| + // creates the file when the first significant write comes in, and only
|
| + // removes it on open if it is empty. Reopen, ensure that the file is
|
| + // deleted, and that there's no data in the third stream.
|
| + ASSERT_EQ(net::OK, CreateEntry(key, &entry));
|
| + EXPECT_EQ(kHalfSize, WriteData(entry, 2, 0, buffer1, kHalfSize, true));
|
| + EXPECT_EQ(0, WriteData(entry, 2, 0, buffer1, 0, true));
|
| + entry->Close();
|
| + EXPECT_TRUE(ThirdStreamFileExists(cache_path(), key));
|
| +
|
| + ASSERT_EQ(net::OK, OpenEntry(key, &entry));
|
| + EXPECT_FALSE(ThirdStreamFileExists(cache_path(), key));
|
| + EXPECT_EQ(0, ReadData(entry, 2, 0, buffer2, kSize));
|
| + entry->Close();
|
| + EXPECT_FALSE(ThirdStreamFileExists(cache_path(), key));
|
| +
|
| + DoomEntry(key);
|
| + EXPECT_FALSE(ThirdStreamFileExists(cache_path(), key));
|
| +}
|
| +
|
| +// Check that we don't accidentally create the third stream file once the entry
|
| +// has been doomed.
|
| +TEST_P(DiskCacheSimpleEntryTest, OmittedThirdStream5) {
|
| + InitCache();
|
| +
|
| + const int kHalfSize = 8;
|
| + const int kSize = kHalfSize * 2;
|
| + const char key[] = "key";
|
| + scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
|
| + CacheTestFillBuffer(buffer->data(), kHalfSize, false);
|
| +
|
| + disk_cache::Entry* entry;
|
| +
|
| + // Create entry, doom entry, write data to third stream, and close: third
|
| + // stream should not exist. (Note: We don't care if the write fails, just
|
| + // that it doesn't cause the file to be created on disk.)
|
| + ASSERT_EQ(net::OK, CreateEntry(key, &entry));
|
| + entry->Doom();
|
| + WriteData(entry, 2, 0, buffer, kHalfSize, true);
|
| + entry->Close();
|
| + EXPECT_FALSE(ThirdStreamFileExists(cache_path(), key));
|
| +}
|
| +
|
| +// There could be a race between Doom and an optimistic write.
|
| +TEST_P(DiskCacheSimpleEntryTest, DoomOptimisticWritesRace) {
|
| + // Test sequence:
|
| + // Create, first Write, second Write, Close.
|
| + // Open, Close.
|
| + InitCache();
|
| + disk_cache::Entry* null = NULL;
|
| + const char key[] = "the first key";
|
| +
|
| + const int kSize = 200;
|
| + scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize));
|
| + scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize));
|
| + CacheTestFillBuffer(buffer1->data(), kSize, false);
|
| + CacheTestFillBuffer(buffer2->data(), kSize, false);
|
| +
|
| + // The race only happens on stream 1 and stream 2.
|
| + for (int i = 0; i < disk_cache::kSimpleEntryStreamCount; ++i) {
|
| + ASSERT_EQ(net::OK, DoomAllEntries());
|
| + disk_cache::Entry* entry = NULL;
|
| +
|
| + ASSERT_EQ(net::OK, CreateEntry(key, &entry));
|
| + EXPECT_NE(null, entry);
|
| + entry->Close();
|
| + entry = NULL;
|
| +
|
| + ASSERT_EQ(net::OK, DoomAllEntries());
|
| + ASSERT_EQ(net::OK, CreateEntry(key, &entry));
|
| + EXPECT_NE(null, entry);
|
| +
|
| + int offset = 0;
|
| + int buf_len = kSize;
|
| + // This write should not be optimistic (since create is).
|
| + EXPECT_EQ(buf_len,
|
| + WriteData(entry, i, offset, buffer1.get(), buf_len, false));
|
| +
|
| + offset = kSize;
|
| + // This write should be optimistic.
|
| + EXPECT_EQ(buf_len,
|
| + WriteData(entry, i, offset, buffer2.get(), buf_len, false));
|
| + entry->Close();
|
| +
|
| + ASSERT_EQ(net::OK, OpenEntry(key, &entry));
|
| + EXPECT_NE(null, entry);
|
| +
|
| + entry->Close();
|
| + entry = NULL;
|
| + }
|
| +}
|
| +
|
| +TEST_P(DiskCacheSimpleEntryTest, TruncateLargeSparseFile) {
|
| + const int kSize = 1024;
|
| +
|
| + // An entry is allowed sparse data 1/10 the size of the cache, so this size
|
| + // allows for one |kSize|-sized range plus overhead, but not two ranges.
|
| + SetMaxSize(kSize * 15);
|
| + InitCache();
|
| +
|
| + const char key[] = "key";
|
| + disk_cache::Entry* null = NULL;
|
| + disk_cache::Entry* entry;
|
| + ASSERT_EQ(net::OK, CreateEntry(key, &entry));
|
| + EXPECT_NE(null, entry);
|
| +
|
| + scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
|
| + CacheTestFillBuffer(buffer->data(), kSize, false);
|
| + net::TestCompletionCallback callback;
|
| + int ret;
|
| +
|
| + // Verify initial conditions.
|
| + ret = entry->ReadSparseData(0, buffer, kSize, callback.callback());
|
| + EXPECT_EQ(0, callback.GetResult(ret));
|
| +
|
| + ret = entry->ReadSparseData(kSize, buffer, kSize, callback.callback());
|
| + EXPECT_EQ(0, callback.GetResult(ret));
|
| +
|
| + // Write a range and make sure it reads back.
|
| + ret = entry->WriteSparseData(0, buffer, kSize, callback.callback());
|
| + EXPECT_EQ(kSize, callback.GetResult(ret));
|
| +
|
| + ret = entry->ReadSparseData(0, buffer, kSize, callback.callback());
|
| + EXPECT_EQ(kSize, callback.GetResult(ret));
|
| +
|
| + // Write another range and make sure it reads back.
|
| + ret = entry->WriteSparseData(kSize, buffer, kSize, callback.callback());
|
| + EXPECT_EQ(kSize, callback.GetResult(ret));
|
| +
|
| + ret = entry->ReadSparseData(kSize, buffer, kSize, callback.callback());
|
| + EXPECT_EQ(kSize, callback.GetResult(ret));
|
| +
|
| + // Make sure the first range was removed when the second was written.
|
| + ret = entry->ReadSparseData(0, buffer, kSize, callback.callback());
|
| + EXPECT_EQ(0, callback.GetResult(ret));
|
| +
|
| + entry->Close();
|
| +}
|
| +
|
| +TEST_P(DiskCacheSimpleBackendTest, SimpleCacheOpenMissingFile) {
|
| + InitCache();
|
| +
|
| + const char* key = "the first key";
|
| + disk_cache::Entry* entry = NULL;
|
| +
|
| + ASSERT_EQ(net::OK, CreateEntry(key, &entry));
|
| + ASSERT_TRUE(entry != NULL);
|
| + entry->Close();
|
| + entry = NULL;
|
| +
|
| + // To make sure the file creation completed we need to call open again so that
|
| + // we block until it actually created the files.
|
| + ASSERT_EQ(net::OK, OpenEntry(key, &entry));
|
| + ASSERT_TRUE(entry != NULL);
|
| + entry->Close();
|
| + entry = NULL;
|
| +
|
| + // Delete one of the files in the entry.
|
| + base::FilePath to_delete_file = cache_path().AppendASCII(
|
| + disk_cache::simple_util::GetFilenameFromKeyAndFileIndex(key, 0));
|
| + EXPECT_TRUE(base::PathExists(to_delete_file));
|
| + EXPECT_TRUE(disk_cache::DeleteCacheFile(to_delete_file));
|
| +
|
| + // Failing to open the entry should delete the rest of these files.
|
| + ASSERT_EQ(net::ERR_FAILED, OpenEntry(key, &entry));
|
| +
|
| + // Confirm the rest of the files are gone.
|
| + for (int i = 1; i < disk_cache::kSimpleEntryFileCount; ++i) {
|
| + base::FilePath should_be_gone_file(cache_path().AppendASCII(
|
| + disk_cache::simple_util::GetFilenameFromKeyAndFileIndex(key, i)));
|
| + EXPECT_FALSE(base::PathExists(should_be_gone_file));
|
| + }
|
| +}
|
| +
|
| +TEST_P(DiskCacheSimpleBackendTest, SimpleCacheOpenBadFile) {
|
| + InitCache();
|
| +
|
| + const char* key = "the first key";
|
| + disk_cache::Entry* entry = NULL;
|
| +
|
| + ASSERT_EQ(net::OK, CreateEntry(key, &entry));
|
| + disk_cache::Entry* null = NULL;
|
| + ASSERT_NE(null, entry);
|
| + entry->Close();
|
| + entry = NULL;
|
| +
|
| + // To make sure the file creation completed we need to call open again so that
|
| + // we block until it actually created the files.
|
| + ASSERT_EQ(net::OK, OpenEntry(key, &entry));
|
| + ASSERT_NE(null, entry);
|
| + entry->Close();
|
| + entry = NULL;
|
| +
|
| + // Write an invalid header for stream 0 and stream 1.
|
| + base::FilePath entry_file1_path = cache_path().AppendASCII(
|
| + disk_cache::simple_util::GetFilenameFromKeyAndFileIndex(key, 0));
|
| +
|
| + disk_cache::SimpleFileHeader header;
|
| + header.initial_magic_number = GG_UINT64_C(0xbadf00d);
|
| + EXPECT_EQ(
|
| + implicit_cast<int>(sizeof(header)),
|
| + file_util::WriteFile(entry_file1_path, reinterpret_cast<char*>(&header),
|
| + sizeof(header)));
|
| + ASSERT_EQ(net::ERR_FAILED, OpenEntry(key, &entry));
|
| +}
|
| +
|
| +// Creates entries based on random keys. Stores these keys in |key_pool|.
|
| +bool CreateSetOfRandomEntries(DiskCacheTest* test,
|
| + std::set<std::string>* key_pool) {
|
| + const int kNumEntries = 10;
|
| +
|
| + for (int i = 0; i < kNumEntries; ++i) {
|
| + std::string key = GenerateKey(true);
|
| + disk_cache::Entry* entry;
|
| + if (test->CreateEntry(key, &entry) != net::OK)
|
| + return false;
|
| + key_pool->insert(key);
|
| + entry->Close();
|
| + }
|
| +
|
| + return
|
| + key_pool->size() == implicit_cast<size_t>(test->cache()->GetEntryCount());
|
| +}
|
| +
|
| +// Performs iteration over the backend and checks that the keys of entries
|
| +// opened are in |keys_to_match|, then erases them. Up to |max_to_open| entries
|
| +// will be opened, if it is positive. Otherwise, iteration will continue until
|
| +// OpenNextEntry stops returning net::OK.
|
| +bool EnumerateAndMatchKeys(DiskCacheTest* test,
|
| + int max_to_open,
|
| + void** iter,
|
| + std::set<std::string>* keys_to_match,
|
| + size_t* count) {
|
| + Entry* entry;
|
| +
|
| + while (test->OpenNextEntry(iter, &entry) == net::OK) {
|
| + if (!entry)
|
| + return false;
|
| + EXPECT_EQ(1U, keys_to_match->erase(entry->GetKey()));
|
| + entry->Close();
|
| + ++(*count);
|
| + if (max_to_open >= 0 && implicit_cast<int>(*count) >= max_to_open)
|
| + break;
|
| + };
|
| +
|
| + return true;
|
| +}
|
| +
|
| +// Tests basic functionality of the SimpleBackend implementation of the
|
| +// enumeration API.
|
| +TEST_P(DiskCacheSimpleBackendTest, EnumerationBasics) {
|
| + InitCache();
|
| + std::set<std::string> key_pool;
|
| + ASSERT_TRUE(CreateSetOfRandomEntries(this, &key_pool));
|
| +
|
| + // Check that enumeration returns all entries.
|
| + std::set<std::string> keys_to_match(key_pool);
|
| + void* iter = NULL;
|
| + size_t count = 0;
|
| + ASSERT_TRUE(EnumerateAndMatchKeys(this, -1, &iter, &keys_to_match, &count));
|
| + cache()->EndEnumeration(&iter);
|
| + EXPECT_EQ(key_pool.size(), count);
|
| + EXPECT_TRUE(keys_to_match.empty());
|
| +
|
| + // Check that opening entries does not affect enumeration.
|
| + keys_to_match = key_pool;
|
| + iter = NULL;
|
| + count = 0;
|
| + disk_cache::Entry* entry_opened_before;
|
| + ASSERT_EQ(net::OK, OpenEntry(*(key_pool.begin()), &entry_opened_before));
|
| + ASSERT_TRUE(EnumerateAndMatchKeys(this, key_pool.size()/2,
|
| + &iter,
|
| + &keys_to_match,
|
| + &count));
|
| +
|
| + disk_cache::Entry* entry_opened_middle;
|
| + ASSERT_EQ(net::OK,
|
| + OpenEntry(*(keys_to_match.begin()), &entry_opened_middle));
|
| + ASSERT_TRUE(EnumerateAndMatchKeys(this, -1, &iter, &keys_to_match, &count));
|
| + cache()->EndEnumeration(&iter);
|
| + entry_opened_before->Close();
|
| + entry_opened_middle->Close();
|
| +
|
| + EXPECT_EQ(key_pool.size(), count);
|
| + EXPECT_TRUE(keys_to_match.empty());
|
| +}
|
| +
|
| +// Tests that the enumerations are not affected by dooming an entry in the
|
| +// middle.
|
| +TEST_P(DiskCacheSimpleBackendTest, EnumerationWhileDoomed) {
|
| + InitCache();
|
| + std::set<std::string> key_pool;
|
| + ASSERT_TRUE(CreateSetOfRandomEntries(this, &key_pool));
|
| +
|
| + // Check that enumeration returns all entries but the doomed one.
|
| + std::set<std::string> keys_to_match(key_pool);
|
| + void* iter = NULL;
|
| + size_t count = 0;
|
| + ASSERT_TRUE(EnumerateAndMatchKeys(this, key_pool.size()/2,
|
| + &iter,
|
| + &keys_to_match,
|
| + &count));
|
| +
|
| + std::string key_to_delete = *(keys_to_match.begin());
|
| + DoomEntry(key_to_delete);
|
| + keys_to_match.erase(key_to_delete);
|
| + key_pool.erase(key_to_delete);
|
| + ASSERT_TRUE(EnumerateAndMatchKeys(this, -1, &iter, &keys_to_match, &count));
|
| + cache()->EndEnumeration(&iter);
|
| +
|
| + EXPECT_EQ(key_pool.size(), count);
|
| + EXPECT_TRUE(keys_to_match.empty());
|
| +}
|
| +
|
| +// Tests that enumerations are not affected by corrupt files.
|
| +TEST_P(DiskCacheSimpleBackendTest, EnumerationCorruption) {
|
| + InitCache();
|
| + std::set<std::string> key_pool;
|
| + ASSERT_TRUE(CreateSetOfRandomEntries(this, &key_pool));
|
| +
|
| + // Create a corrupt entry. The write/read sequence ensures that the entry will
|
| + // have been created before corrupting the platform files, in the case of
|
| + // optimistic operations.
|
| + const std::string key = "the key";
|
| + disk_cache::Entry* corrupted_entry;
|
| +
|
| + ASSERT_EQ(net::OK, CreateEntry(key, &corrupted_entry));
|
| + ASSERT_TRUE(corrupted_entry);
|
| + const int kSize = 50;
|
| + scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
|
| + CacheTestFillBuffer(buffer->data(), kSize, false);
|
| + ASSERT_EQ(kSize,
|
| + WriteData(corrupted_entry, 0, 0, buffer.get(), kSize, false));
|
| + ASSERT_EQ(kSize, ReadData(corrupted_entry, 0, 0, buffer.get(), kSize));
|
| + corrupted_entry->Close();
|
| +
|
| + EXPECT_TRUE(simple_util::CreateCorruptFileForTests(key, cache_path()));
|
| + EXPECT_EQ(key_pool.size() + 1,
|
| + implicit_cast<size_t>(cache()->GetEntryCount()));
|
| +
|
| + // Check that enumeration returns all entries but the corrupt one.
|
| + std::set<std::string> keys_to_match(key_pool);
|
| + void* iter = NULL;
|
| + size_t count = 0;
|
| + ASSERT_TRUE(EnumerateAndMatchKeys(this, -1, &iter, &keys_to_match, &count));
|
| + cache()->EndEnumeration(&iter);
|
| +
|
| + EXPECT_EQ(key_pool.size(), count);
|
| + EXPECT_TRUE(keys_to_match.empty());
|
| +}
|
| +
|
| +} // namespace
|
| +
|
| +} // namespace disk_cache
|
|
|