| Index: net/disk_cache/entry_tests.cc
|
| diff --git a/net/disk_cache/entry_tests.cc b/net/disk_cache/entry_tests.cc
|
| new file mode 100644
|
| index 0000000000000000000000000000000000000000..46a3a92248920431213d53abf181ddcb7a471d17
|
| --- /dev/null
|
| +++ b/net/disk_cache/entry_tests.cc
|
| @@ -0,0 +1,1553 @@
|
| +// Copyright (c) 2014 The Chromium Authors. All rights reserved.
|
| +// Use of this source code is governed by a BSD-style license that can be
|
| +// found in the LICENSE file.
|
| +
|
| +#include "net/disk_cache/entry_tests.h"
|
| +
|
| +#include <string>
|
| +
|
| +#include "base/strings/string_util.h"
|
| +#include "base/strings/stringprintf.h"
|
| +#include "net/base/io_buffer.h"
|
| +#include "net/base/net_errors.h"
|
| +#include "net/base/test_completion_callback.h"
|
| +#include "base/time/time.h"
|
| +#include "net/disk_cache/disk_cache.h"
|
| +#include "net/disk_cache/disk_cache_test.h"
|
| +#include "net/disk_cache/disk_cache_test_util.h"
|
| +
|
| +using base::Time;
|
| +
|
| +namespace disk_cache {
|
| +
|
| +namespace {
|
| +
|
| +TEST_P(DiskCacheEntryTest, InternalAsyncIO) {
|
| + InitCache();
|
| +
|
| + Entry* entry = NULL;
|
| + ASSERT_EQ(net::OK, CreateEntry("the first key", &entry));
|
| + ASSERT_TRUE(NULL != entry);
|
| +
|
| + // Avoid using internal buffers for the test. We have to write something to
|
| + // the entry and close it so that we flush the internal buffer to disk. After
|
| + // that, IO operations will be really hitting the disk. We don't care about
|
| + // the content, so just extending the entry is enough (all extensions zero-
|
| + // fill any holes).
|
| + EXPECT_EQ(0, WriteData(entry, 0, 15 * 1024, NULL, 0, false));
|
| + EXPECT_EQ(0, WriteData(entry, 1, 15 * 1024, NULL, 0, false));
|
| + entry->Close();
|
| + ASSERT_EQ(net::OK, OpenEntry("the first key", &entry));
|
| +
|
| + MessageLoopHelper helper;
|
| + // Let's verify that each IO goes to the right callback object.
|
| + CallbackTest callback1(&helper, false);
|
| + CallbackTest callback2(&helper, false);
|
| + CallbackTest callback3(&helper, false);
|
| + CallbackTest callback4(&helper, false);
|
| + CallbackTest callback5(&helper, false);
|
| + CallbackTest callback6(&helper, false);
|
| + CallbackTest callback7(&helper, false);
|
| + CallbackTest callback8(&helper, false);
|
| + CallbackTest callback9(&helper, false);
|
| + CallbackTest callback10(&helper, false);
|
| + CallbackTest callback11(&helper, false);
|
| + CallbackTest callback12(&helper, false);
|
| + CallbackTest callback13(&helper, false);
|
| +
|
| + const int kSize1 = 10;
|
| + const int kSize2 = 5000;
|
| + const int kSize3 = 10000;
|
| + scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize1));
|
| + scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize2));
|
| + scoped_refptr<net::IOBuffer> buffer3(new net::IOBuffer(kSize3));
|
| + CacheTestFillBuffer(buffer1->data(), kSize1, false);
|
| + CacheTestFillBuffer(buffer2->data(), kSize2, false);
|
| + CacheTestFillBuffer(buffer3->data(), kSize3, false);
|
| +
|
| + EXPECT_EQ(0,
|
| + entry->ReadData(
|
| + 0,
|
| + 15 * 1024,
|
| + buffer1.get(),
|
| + kSize1,
|
| + base::Bind(&CallbackTest::Run, base::Unretained(&callback1))));
|
| + base::strlcpy(buffer1->data(), "the data", kSize1);
|
| + int expected = 0;
|
| + int ret = entry->WriteData(
|
| + 0,
|
| + 0,
|
| + buffer1.get(),
|
| + kSize1,
|
| + base::Bind(&CallbackTest::Run, base::Unretained(&callback2)),
|
| + false);
|
| + EXPECT_TRUE(10 == ret || net::ERR_IO_PENDING == ret);
|
| + if (net::ERR_IO_PENDING == ret)
|
| + expected++;
|
| +
|
| + EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
|
| + memset(buffer2->data(), 0, kSize2);
|
| + ret = entry->ReadData(
|
| + 0,
|
| + 0,
|
| + buffer2.get(),
|
| + kSize1,
|
| + base::Bind(&CallbackTest::Run, base::Unretained(&callback3)));
|
| + EXPECT_TRUE(10 == ret || net::ERR_IO_PENDING == ret);
|
| + if (net::ERR_IO_PENDING == ret)
|
| + expected++;
|
| +
|
| + EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
|
| + EXPECT_STREQ("the data", buffer2->data());
|
| +
|
| + base::strlcpy(buffer2->data(), "The really big data goes here", kSize2);
|
| + ret = entry->WriteData(
|
| + 1,
|
| + 1500,
|
| + buffer2.get(),
|
| + kSize2,
|
| + base::Bind(&CallbackTest::Run, base::Unretained(&callback4)),
|
| + true);
|
| + EXPECT_TRUE(5000 == ret || net::ERR_IO_PENDING == ret);
|
| + if (net::ERR_IO_PENDING == ret)
|
| + expected++;
|
| +
|
| + EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
|
| + memset(buffer3->data(), 0, kSize3);
|
| + ret = entry->ReadData(
|
| + 1,
|
| + 1511,
|
| + buffer3.get(),
|
| + kSize2,
|
| + base::Bind(&CallbackTest::Run, base::Unretained(&callback5)));
|
| + EXPECT_TRUE(4989 == ret || net::ERR_IO_PENDING == ret);
|
| + if (net::ERR_IO_PENDING == ret)
|
| + expected++;
|
| +
|
| + EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
|
| + EXPECT_STREQ("big data goes here", buffer3->data());
|
| + ret = entry->ReadData(
|
| + 1,
|
| + 0,
|
| + buffer2.get(),
|
| + kSize2,
|
| + base::Bind(&CallbackTest::Run, base::Unretained(&callback6)));
|
| + EXPECT_TRUE(5000 == ret || net::ERR_IO_PENDING == ret);
|
| + if (net::ERR_IO_PENDING == ret)
|
| + expected++;
|
| +
|
| + memset(buffer3->data(), 0, kSize3);
|
| +
|
| + EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
|
| + EXPECT_EQ(0, memcmp(buffer2->data(), buffer3->data(), 1500));
|
| + ret = entry->ReadData(
|
| + 1,
|
| + 5000,
|
| + buffer2.get(),
|
| + kSize2,
|
| + base::Bind(&CallbackTest::Run, base::Unretained(&callback7)));
|
| + EXPECT_TRUE(1500 == ret || net::ERR_IO_PENDING == ret);
|
| + if (net::ERR_IO_PENDING == ret)
|
| + expected++;
|
| +
|
| + ret = entry->ReadData(
|
| + 1,
|
| + 0,
|
| + buffer3.get(),
|
| + kSize3,
|
| + base::Bind(&CallbackTest::Run, base::Unretained(&callback9)));
|
| + EXPECT_TRUE(6500 == ret || net::ERR_IO_PENDING == ret);
|
| + if (net::ERR_IO_PENDING == ret)
|
| + expected++;
|
| +
|
| + ret = entry->WriteData(
|
| + 1,
|
| + 0,
|
| + buffer3.get(),
|
| + 8192,
|
| + base::Bind(&CallbackTest::Run, base::Unretained(&callback10)),
|
| + true);
|
| + EXPECT_TRUE(8192 == ret || net::ERR_IO_PENDING == ret);
|
| + if (net::ERR_IO_PENDING == ret)
|
| + expected++;
|
| +
|
| + EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
|
| + ret = entry->ReadData(
|
| + 1,
|
| + 0,
|
| + buffer3.get(),
|
| + kSize3,
|
| + base::Bind(&CallbackTest::Run, base::Unretained(&callback11)));
|
| + EXPECT_TRUE(8192 == ret || net::ERR_IO_PENDING == ret);
|
| + if (net::ERR_IO_PENDING == ret)
|
| + expected++;
|
| +
|
| + EXPECT_EQ(8192, entry->GetDataSize(1));
|
| +
|
| + ret = entry->ReadData(
|
| + 0,
|
| + 0,
|
| + buffer1.get(),
|
| + kSize1,
|
| + base::Bind(&CallbackTest::Run, base::Unretained(&callback12)));
|
| + EXPECT_TRUE(10 == ret || net::ERR_IO_PENDING == ret);
|
| + if (net::ERR_IO_PENDING == ret)
|
| + expected++;
|
| +
|
| + ret = entry->ReadData(
|
| + 1,
|
| + 0,
|
| + buffer2.get(),
|
| + kSize2,
|
| + base::Bind(&CallbackTest::Run, base::Unretained(&callback13)));
|
| + EXPECT_TRUE(5000 == ret || net::ERR_IO_PENDING == ret);
|
| + if (net::ERR_IO_PENDING == ret)
|
| + expected++;
|
| +
|
| + EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
|
| +
|
| + EXPECT_FALSE(helper.callback_reused_error());
|
| +
|
| + entry->Doom();
|
| + entry->Close();
|
| + FlushQueueForTest();
|
| + EXPECT_EQ(0, cache()->GetEntryCount());
|
| +}
|
| +
|
| +TEST_P(DiskCacheEntryTest, ExternalAsyncIO) {
|
| + InitCache();
|
| + Entry* entry;
|
| + ASSERT_EQ(net::OK, CreateEntry("the first key", &entry));
|
| +
|
| + int expected = 0;
|
| +
|
| + MessageLoopHelper helper;
|
| + // Let's verify that each IO goes to the right callback object.
|
| + CallbackTest callback1(&helper, false);
|
| + CallbackTest callback2(&helper, false);
|
| + CallbackTest callback3(&helper, false);
|
| + CallbackTest callback4(&helper, false);
|
| + CallbackTest callback5(&helper, false);
|
| + CallbackTest callback6(&helper, false);
|
| + CallbackTest callback7(&helper, false);
|
| + CallbackTest callback8(&helper, false);
|
| + CallbackTest callback9(&helper, false);
|
| +
|
| + const int kSize1 = 17000;
|
| + const int kSize2 = 25000;
|
| + const int kSize3 = 25000;
|
| + scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize1));
|
| + scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize2));
|
| + scoped_refptr<net::IOBuffer> buffer3(new net::IOBuffer(kSize3));
|
| + CacheTestFillBuffer(buffer1->data(), kSize1, false);
|
| + CacheTestFillBuffer(buffer2->data(), kSize2, false);
|
| + CacheTestFillBuffer(buffer3->data(), kSize3, false);
|
| + base::strlcpy(buffer1->data(), "the data", kSize1);
|
| + int ret = entry->WriteData(
|
| + 0,
|
| + 0,
|
| + buffer1.get(),
|
| + kSize1,
|
| + base::Bind(&CallbackTest::Run, base::Unretained(&callback1)),
|
| + false);
|
| + EXPECT_TRUE(17000 == ret || net::ERR_IO_PENDING == ret);
|
| + if (net::ERR_IO_PENDING == ret)
|
| + expected++;
|
| +
|
| + EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
|
| +
|
| + memset(buffer2->data(), 0, kSize1);
|
| + ret = entry->ReadData(
|
| + 0,
|
| + 0,
|
| + buffer2.get(),
|
| + kSize1,
|
| + base::Bind(&CallbackTest::Run, base::Unretained(&callback2)));
|
| + EXPECT_TRUE(17000 == ret || net::ERR_IO_PENDING == ret);
|
| + if (net::ERR_IO_PENDING == ret)
|
| + expected++;
|
| +
|
| + EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
|
| + EXPECT_STREQ("the data", buffer2->data());
|
| +
|
| + base::strlcpy(buffer2->data(), "The really big data goes here", kSize2);
|
| + ret = entry->WriteData(
|
| + 1,
|
| + 10000,
|
| + buffer2.get(),
|
| + kSize2,
|
| + base::Bind(&CallbackTest::Run, base::Unretained(&callback3)),
|
| + false);
|
| + EXPECT_TRUE(25000 == ret || net::ERR_IO_PENDING == ret);
|
| + if (net::ERR_IO_PENDING == ret)
|
| + expected++;
|
| +
|
| + EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
|
| +
|
| + memset(buffer3->data(), 0, kSize3);
|
| + ret = entry->ReadData(
|
| + 1,
|
| + 10011,
|
| + buffer3.get(),
|
| + kSize3,
|
| + base::Bind(&CallbackTest::Run, base::Unretained(&callback4)));
|
| + EXPECT_TRUE(24989 == ret || net::ERR_IO_PENDING == ret);
|
| + if (net::ERR_IO_PENDING == ret)
|
| + expected++;
|
| +
|
| + EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
|
| + EXPECT_STREQ("big data goes here", buffer3->data());
|
| + ret = entry->ReadData(
|
| + 1,
|
| + 0,
|
| + buffer2.get(),
|
| + kSize2,
|
| + base::Bind(&CallbackTest::Run, base::Unretained(&callback5)));
|
| + EXPECT_TRUE(25000 == ret || net::ERR_IO_PENDING == ret);
|
| + if (net::ERR_IO_PENDING == ret)
|
| + expected++;
|
| +
|
| + EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
|
| + memset(buffer3->data(), 0, kSize3);
|
| + EXPECT_EQ(0, memcmp(buffer2->data(), buffer3->data(), 10000));
|
| + ret = entry->ReadData(
|
| + 1,
|
| + 30000,
|
| + buffer2.get(),
|
| + kSize2,
|
| + base::Bind(&CallbackTest::Run, base::Unretained(&callback6)));
|
| + EXPECT_TRUE(5000 == ret || net::ERR_IO_PENDING == ret);
|
| + if (net::ERR_IO_PENDING == ret)
|
| + expected++;
|
| +
|
| + EXPECT_EQ(0,
|
| + entry->ReadData(
|
| + 1,
|
| + 35000,
|
| + buffer2.get(),
|
| + kSize2,
|
| + base::Bind(&CallbackTest::Run, base::Unretained(&callback7))));
|
| + ret = entry->ReadData(
|
| + 1,
|
| + 0,
|
| + buffer1.get(),
|
| + kSize1,
|
| + base::Bind(&CallbackTest::Run, base::Unretained(&callback8)));
|
| + EXPECT_TRUE(17000 == ret || net::ERR_IO_PENDING == ret);
|
| + if (net::ERR_IO_PENDING == ret)
|
| + expected++;
|
| + ret = entry->WriteData(
|
| + 1,
|
| + 20000,
|
| + buffer3.get(),
|
| + kSize1,
|
| + base::Bind(&CallbackTest::Run, base::Unretained(&callback9)),
|
| + false);
|
| + EXPECT_TRUE(17000 == ret || net::ERR_IO_PENDING == ret);
|
| + if (net::ERR_IO_PENDING == ret)
|
| + expected++;
|
| +
|
| + EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
|
| + EXPECT_EQ(37000, entry->GetDataSize(1));
|
| +
|
| + EXPECT_FALSE(helper.callback_reused_error());
|
| +
|
| + entry->Doom();
|
| + entry->Close();
|
| + FlushQueueForTest();
|
| + EXPECT_EQ(0, cache()->GetEntryCount());
|
| +}
|
| +
|
| +// Tests that IOBuffers are not referenced after IO completes.
|
| +TEST_P(DiskCacheEntryTest, ReleaseBuffer) {
|
| + InitCache();
|
| +
|
| + Entry* entry = NULL;
|
| + ASSERT_EQ(net::OK, CreateEntry("the first key", &entry));
|
| + ASSERT_TRUE(NULL != entry);
|
| +
|
| + const int kBufferSize = 1024;
|
| + scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kBufferSize));
|
| + CacheTestFillBuffer(buffer->data(), kBufferSize, false);
|
| +
|
| + net::ReleaseBufferCompletionCallback cb(buffer.get());
|
| + int rv =
|
| + entry->WriteData(0, 0, buffer.get(), kBufferSize, cb.callback(), false);
|
| + EXPECT_EQ(kBufferSize, cb.GetResult(rv));
|
| + entry->Close();
|
| +}
|
| +
|
| +TEST_P(DiskCacheEntryTest, StreamAccess) {
|
| + InitCache();
|
| +
|
| + disk_cache::Entry* entry = NULL;
|
| + ASSERT_EQ(net::OK, CreateEntry("the first key", &entry));
|
| + ASSERT_TRUE(NULL != entry);
|
| +
|
| + const int kBufferSize = 1024;
|
| + const int kNumStreams = 3;
|
| + scoped_refptr<net::IOBuffer> reference_buffers[kNumStreams];
|
| + for (int i = 0; i < kNumStreams; i++) {
|
| + reference_buffers[i] = new net::IOBuffer(kBufferSize);
|
| + CacheTestFillBuffer(reference_buffers[i]->data(), kBufferSize, false);
|
| + }
|
| + scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kBufferSize));
|
| + for (int i = 0; i < kNumStreams; i++) {
|
| + EXPECT_EQ(
|
| + kBufferSize,
|
| + WriteData(entry, i, 0, reference_buffers[i].get(), kBufferSize, false));
|
| + memset(buffer1->data(), 0, kBufferSize);
|
| + EXPECT_EQ(kBufferSize, ReadData(entry, i, 0, buffer1.get(), kBufferSize));
|
| + EXPECT_EQ(
|
| + 0, memcmp(reference_buffers[i]->data(), buffer1->data(), kBufferSize));
|
| + }
|
| + EXPECT_EQ(net::ERR_INVALID_ARGUMENT,
|
| + ReadData(entry, kNumStreams, 0, buffer1.get(), kBufferSize));
|
| + entry->Close();
|
| +
|
| + // Open the entry and read it in chunks, including a read past the end.
|
| + ASSERT_EQ(net::OK, OpenEntry("the first key", &entry));
|
| + ASSERT_TRUE(NULL != entry);
|
| + const int kReadBufferSize = 600;
|
| + const int kFinalReadSize = kBufferSize - kReadBufferSize;
|
| + COMPILE_ASSERT(kFinalReadSize < kReadBufferSize, should_be_exactly_two_reads);
|
| + scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kReadBufferSize));
|
| + for (int i = 0; i < kNumStreams; i++) {
|
| + memset(buffer2->data(), 0, kReadBufferSize);
|
| + EXPECT_EQ(kReadBufferSize,
|
| + ReadData(entry, i, 0, buffer2.get(), kReadBufferSize));
|
| + EXPECT_EQ(
|
| + 0,
|
| + memcmp(reference_buffers[i]->data(), buffer2->data(), kReadBufferSize));
|
| +
|
| + memset(buffer2->data(), 0, kReadBufferSize);
|
| + EXPECT_EQ(
|
| + kFinalReadSize,
|
| + ReadData(entry, i, kReadBufferSize, buffer2.get(), kReadBufferSize));
|
| + EXPECT_EQ(0,
|
| + memcmp(reference_buffers[i]->data() + kReadBufferSize,
|
| + buffer2->data(),
|
| + kFinalReadSize));
|
| + }
|
| +
|
| + entry->Close();
|
| +}
|
| +
|
| +TEST_P(DiskCacheEntryTest, GetKey) {
|
| + InitCache();
|
| +
|
| + std::string key("the first key");
|
| + Entry* entry;
|
| + ASSERT_EQ(net::OK, CreateEntry(key, &entry));
|
| + EXPECT_EQ(key, entry->GetKey()) << "short key";
|
| + entry->Close();
|
| +
|
| + int seed = static_cast<int>(Time::Now().ToInternalValue());
|
| + srand(seed);
|
| + char key_buffer[20000];
|
| +
|
| + CacheTestFillBuffer(key_buffer, 3000, true);
|
| + key_buffer[1000] = '\0';
|
| +
|
| + key = key_buffer;
|
| + ASSERT_EQ(net::OK, CreateEntry(key, &entry));
|
| + EXPECT_TRUE(key == entry->GetKey()) << "1000 bytes key";
|
| + entry->Close();
|
| +
|
| + key_buffer[1000] = 'p';
|
| + key_buffer[3000] = '\0';
|
| + key = key_buffer;
|
| + ASSERT_EQ(net::OK, CreateEntry(key, &entry));
|
| + EXPECT_TRUE(key == entry->GetKey()) << "medium size key";
|
| + entry->Close();
|
| +
|
| + CacheTestFillBuffer(key_buffer, sizeof(key_buffer), true);
|
| + key_buffer[19999] = '\0';
|
| +
|
| + key = key_buffer;
|
| + ASSERT_EQ(net::OK, CreateEntry(key, &entry));
|
| + EXPECT_TRUE(key == entry->GetKey()) << "long key";
|
| + entry->Close();
|
| +
|
| + CacheTestFillBuffer(key_buffer, 0x4000, true);
|
| + key_buffer[0x4000] = '\0';
|
| +
|
| + key = key_buffer;
|
| + ASSERT_EQ(net::OK, CreateEntry(key, &entry));
|
| + EXPECT_TRUE(key == entry->GetKey()) << "16KB key";
|
| + entry->Close();
|
| +}
|
| +
|
| +TEST_P(DiskCacheEntryTest, GetTimes) {
|
| + InitCache();
|
| +
|
| + std::string key("the first key");
|
| + disk_cache::Entry* entry;
|
| +
|
| + Time t1 = Time::Now();
|
| + ASSERT_EQ(net::OK, CreateEntry(key, &entry));
|
| + EXPECT_TRUE(entry->GetLastModified() == entry->GetLastUsed());
|
| +
|
| + AddDelay();
|
| + Time t2 = Time::Now();
|
| + EXPECT_TRUE(t2 > t1);
|
| + EXPECT_EQ(0, WriteData(entry, 0, 200, NULL, 0, false));
|
| + if (!traits()->WritesUpdateLastUsed())
|
| + EXPECT_TRUE(entry->GetLastModified() < t2);
|
| + else
|
| + EXPECT_TRUE(entry->GetLastModified() >= t2);
|
| + EXPECT_TRUE(entry->GetLastModified() == entry->GetLastUsed());
|
| +
|
| + AddDelay();
|
| + Time t3 = Time::Now();
|
| + EXPECT_TRUE(t3 > t2);
|
| + const int kSize = 200;
|
| + scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
|
| + EXPECT_EQ(kSize, ReadData(entry, 0, 0, buffer.get(), kSize));
|
| + if (!traits()->ReadsUpdateLastUsed()) {
|
| + if (!traits()->WritesUpdateLastUsed()) {
|
| + EXPECT_TRUE(entry->GetLastUsed() < t2);
|
| + EXPECT_TRUE(entry->GetLastModified() < t2);
|
| + } else {
|
| + EXPECT_TRUE(entry->GetLastUsed() < t3);
|
| + EXPECT_TRUE(entry->GetLastModified() < t3);
|
| + }
|
| + } else {
|
| + EXPECT_TRUE(entry->GetLastUsed() >= t3);
|
| + EXPECT_TRUE(entry->GetLastModified() < t3);
|
| + }
|
| +
|
| + entry->Close();
|
| +}
|
| +
|
| +TEST_P(DiskCacheEntryTest, GrowData) {
|
| + InitCache();
|
| +
|
| + std::string key1("the first key");
|
| + disk_cache::Entry* entry;
|
| + ASSERT_EQ(net::OK, CreateEntry(key1, &entry));
|
| +
|
| + const int kSize = 20000;
|
| + scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize));
|
| + scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize));
|
| + CacheTestFillBuffer(buffer1->data(), kSize, false);
|
| + memset(buffer2->data(), 0, kSize);
|
| +
|
| + base::strlcpy(buffer1->data(), "the data", kSize);
|
| + EXPECT_EQ(10, WriteData(entry, 0, 0, buffer1.get(), 10, false));
|
| + EXPECT_EQ(10, ReadData(entry, 0, 0, buffer2.get(), 10));
|
| + EXPECT_STREQ("the data", buffer2->data());
|
| + EXPECT_EQ(10, entry->GetDataSize(0));
|
| +
|
| + EXPECT_EQ(2000, WriteData(entry, 0, 0, buffer1.get(), 2000, false));
|
| + EXPECT_EQ(2000, entry->GetDataSize(0));
|
| + EXPECT_EQ(2000, ReadData(entry, 0, 0, buffer2.get(), 2000));
|
| + EXPECT_TRUE(!memcmp(buffer1->data(), buffer2->data(), 2000));
|
| +
|
| + EXPECT_EQ(20000, WriteData(entry, 0, 0, buffer1.get(), kSize, false));
|
| + EXPECT_EQ(20000, entry->GetDataSize(0));
|
| + EXPECT_EQ(20000, ReadData(entry, 0, 0, buffer2.get(), kSize));
|
| + EXPECT_TRUE(!memcmp(buffer1->data(), buffer2->data(), kSize));
|
| + entry->Close();
|
| +
|
| + memset(buffer2->data(), 0, kSize);
|
| + std::string key2("Second key");
|
| + ASSERT_EQ(net::OK, CreateEntry(key2, &entry));
|
| + EXPECT_EQ(10, WriteData(entry, 0, 0, buffer1.get(), 10, false));
|
| + EXPECT_EQ(10, entry->GetDataSize(0));
|
| + entry->Close();
|
| +
|
| + // Go from an internal address to a bigger block size.
|
| + ASSERT_EQ(net::OK, OpenEntry(key2, &entry));
|
| + EXPECT_EQ(2000, WriteData(entry, 0, 0, buffer1.get(), 2000, false));
|
| + EXPECT_EQ(2000, entry->GetDataSize(0));
|
| + EXPECT_EQ(2000, ReadData(entry, 0, 0, buffer2.get(), 2000));
|
| + EXPECT_TRUE(!memcmp(buffer1->data(), buffer2->data(), 2000));
|
| + entry->Close();
|
| + memset(buffer2->data(), 0, kSize);
|
| +
|
| + // Go from an internal address to an external one.
|
| + ASSERT_EQ(net::OK, OpenEntry(key2, &entry));
|
| + EXPECT_EQ(20000, WriteData(entry, 0, 0, buffer1.get(), kSize, false));
|
| + EXPECT_EQ(20000, entry->GetDataSize(0));
|
| + EXPECT_EQ(20000, ReadData(entry, 0, 0, buffer2.get(), kSize));
|
| + EXPECT_TRUE(!memcmp(buffer1->data(), buffer2->data(), kSize));
|
| + entry->Close();
|
| +
|
| + // Double check the size from disk.
|
| + ASSERT_EQ(net::OK, OpenEntry(key2, &entry));
|
| + EXPECT_EQ(20000, entry->GetDataSize(0));
|
| +
|
| + // Now extend the entry without actual data.
|
| + EXPECT_EQ(0, WriteData(entry, 0, 45500, buffer1.get(), 0, false));
|
| + entry->Close();
|
| +
|
| + // And check again from disk.
|
| + ASSERT_EQ(net::OK, OpenEntry(key2, &entry));
|
| + EXPECT_EQ(45500, entry->GetDataSize(0));
|
| + entry->Close();
|
| +}
|
| +
|
| +TEST_P(DiskCacheEntryTest, TruncateData) {
|
| + InitCache();
|
| +
|
| + std::string key("the first key");
|
| + disk_cache::Entry* entry;
|
| + ASSERT_EQ(net::OK, CreateEntry(key, &entry));
|
| +
|
| + const int kSize1 = 20000;
|
| + const int kSize2 = 20000;
|
| + scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize1));
|
| + scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize2));
|
| +
|
| + CacheTestFillBuffer(buffer1->data(), kSize1, false);
|
| + memset(buffer2->data(), 0, kSize2);
|
| +
|
| + // Simple truncation:
|
| + EXPECT_EQ(200, WriteData(entry, 0, 0, buffer1.get(), 200, false));
|
| + EXPECT_EQ(200, entry->GetDataSize(0));
|
| + EXPECT_EQ(100, WriteData(entry, 0, 0, buffer1.get(), 100, false));
|
| + EXPECT_EQ(200, entry->GetDataSize(0));
|
| + EXPECT_EQ(100, WriteData(entry, 0, 0, buffer1.get(), 100, true));
|
| + EXPECT_EQ(100, entry->GetDataSize(0));
|
| + EXPECT_EQ(0, WriteData(entry, 0, 50, buffer1.get(), 0, true));
|
| + EXPECT_EQ(50, entry->GetDataSize(0));
|
| + EXPECT_EQ(0, WriteData(entry, 0, 0, buffer1.get(), 0, true));
|
| + EXPECT_EQ(0, entry->GetDataSize(0));
|
| + entry->Close();
|
| + ASSERT_EQ(net::OK, OpenEntry(key, &entry));
|
| +
|
| + // Go to an external file.
|
| + EXPECT_EQ(20000, WriteData(entry, 0, 0, buffer1.get(), 20000, true));
|
| + EXPECT_EQ(20000, entry->GetDataSize(0));
|
| + EXPECT_EQ(20000, ReadData(entry, 0, 0, buffer2.get(), 20000));
|
| + EXPECT_TRUE(!memcmp(buffer1->data(), buffer2->data(), 20000));
|
| + memset(buffer2->data(), 0, kSize2);
|
| +
|
| + // External file truncation
|
| + EXPECT_EQ(18000, WriteData(entry, 0, 0, buffer1.get(), 18000, false));
|
| + EXPECT_EQ(20000, entry->GetDataSize(0));
|
| + EXPECT_EQ(18000, WriteData(entry, 0, 0, buffer1.get(), 18000, true));
|
| + EXPECT_EQ(18000, entry->GetDataSize(0));
|
| + EXPECT_EQ(0, WriteData(entry, 0, 17500, buffer1.get(), 0, true));
|
| + EXPECT_EQ(17500, entry->GetDataSize(0));
|
| +
|
| + // And back to an internal block.
|
| + EXPECT_EQ(600, WriteData(entry, 0, 1000, buffer1.get(), 600, true));
|
| + EXPECT_EQ(1600, entry->GetDataSize(0));
|
| + EXPECT_EQ(600, ReadData(entry, 0, 1000, buffer2.get(), 600));
|
| + EXPECT_TRUE(!memcmp(buffer1->data(), buffer2->data(), 600));
|
| + EXPECT_EQ(1000, ReadData(entry, 0, 0, buffer2.get(), 1000));
|
| + EXPECT_TRUE(!memcmp(buffer1->data(), buffer2->data(), 1000))
|
| + << "Preserves previous data";
|
| +
|
| + // Go from external file to zero length.
|
| + EXPECT_EQ(20000, WriteData(entry, 0, 0, buffer1.get(), 20000, true));
|
| + EXPECT_EQ(20000, entry->GetDataSize(0));
|
| + EXPECT_EQ(0, WriteData(entry, 0, 0, buffer1.get(), 0, true));
|
| + EXPECT_EQ(0, entry->GetDataSize(0));
|
| +
|
| + entry->Close();
|
| +}
|
| +
|
| +TEST_P(DiskCacheEntryTest, ZeroLengthIO) {
|
| + InitCache();
|
| +
|
| + std::string key("the first key");
|
| + disk_cache::Entry* entry;
|
| + ASSERT_EQ(net::OK, CreateEntry(key, &entry));
|
| +
|
| + EXPECT_EQ(0, ReadData(entry, 0, 0, NULL, 0));
|
| + EXPECT_EQ(0, WriteData(entry, 0, 0, NULL, 0, false));
|
| +
|
| + // This write should extend the entry.
|
| + EXPECT_EQ(0, WriteData(entry, 0, 1000, NULL, 0, false));
|
| + EXPECT_EQ(0, ReadData(entry, 0, 500, NULL, 0));
|
| + EXPECT_EQ(0, ReadData(entry, 0, 2000, NULL, 0));
|
| + EXPECT_EQ(1000, entry->GetDataSize(0));
|
| +
|
| + EXPECT_EQ(0, WriteData(entry, 0, 100000, NULL, 0, true));
|
| + EXPECT_EQ(0, ReadData(entry, 0, 50000, NULL, 0));
|
| + EXPECT_EQ(100000, entry->GetDataSize(0));
|
| +
|
| + // Let's verify the actual content.
|
| + const int kSize = 20;
|
| + const char zeros[kSize] = {};
|
| + scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
|
| +
|
| + CacheTestFillBuffer(buffer->data(), kSize, false);
|
| + EXPECT_EQ(kSize, ReadData(entry, 0, 500, buffer.get(), kSize));
|
| + EXPECT_TRUE(!memcmp(buffer->data(), zeros, kSize));
|
| +
|
| + CacheTestFillBuffer(buffer->data(), kSize, false);
|
| + EXPECT_EQ(kSize, ReadData(entry, 0, 5000, buffer.get(), kSize));
|
| + EXPECT_TRUE(!memcmp(buffer->data(), zeros, kSize));
|
| +
|
| + CacheTestFillBuffer(buffer->data(), kSize, false);
|
| + EXPECT_EQ(kSize, ReadData(entry, 0, 50000, buffer.get(), kSize));
|
| + EXPECT_TRUE(!memcmp(buffer->data(), zeros, kSize));
|
| +
|
| + entry->Close();
|
| +}
|
| +
|
| +// Checks that entries are zero length when created.
|
| +TEST_P(DiskCacheEntryTest, SizeAtCreate) {
|
| + InitCache();
|
| +
|
| + const char key[] = "the first key";
|
| + disk_cache::Entry* entry;
|
| + ASSERT_EQ(net::OK, CreateEntry(key, &entry));
|
| +
|
| + const int kNumStreams = 3;
|
| + for (int i = 0; i < kNumStreams; ++i)
|
| + EXPECT_EQ(0, entry->GetDataSize(i));
|
| + entry->Close();
|
| +}
|
| +
|
| +// Some extra tests to make sure that buffering works properly when changing
|
| +// the entry size.
|
| +TEST_P(DiskCacheEntryTest, SizeChanges) {
|
| + InitCache();
|
| +
|
| + std::string key("the first key");
|
| + disk_cache::Entry* entry;
|
| + ASSERT_EQ(net::OK, CreateEntry(key, &entry));
|
| +
|
| + const int kSize = 200;
|
| + const char zeros[kSize] = {};
|
| + scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize));
|
| + scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize));
|
| + CacheTestFillBuffer(buffer1->data(), kSize, true);
|
| + CacheTestFillBuffer(buffer2->data(), kSize, true);
|
| +
|
| + EXPECT_EQ(kSize, WriteData(entry, 1, 0, buffer1.get(), kSize, true));
|
| + EXPECT_EQ(kSize, WriteData(entry, 1, 17000, buffer1.get(), kSize, true));
|
| + EXPECT_EQ(kSize, WriteData(entry, 1, 23000, buffer1.get(), kSize, true));
|
| + entry->Close();
|
| +
|
| + // Extend the file and read between the old size and the new write.
|
| + ASSERT_EQ(net::OK, OpenEntry(key, &entry));
|
| + EXPECT_EQ(23000 + kSize, entry->GetDataSize(1));
|
| + EXPECT_EQ(kSize, WriteData(entry, 1, 25000, buffer1.get(), kSize, true));
|
| + EXPECT_EQ(25000 + kSize, entry->GetDataSize(1));
|
| + EXPECT_EQ(kSize, ReadData(entry, 1, 24000, buffer2.get(), kSize));
|
| + EXPECT_TRUE(!memcmp(buffer2->data(), zeros, kSize));
|
| +
|
| + // Read at the end of the old file size.
|
| + EXPECT_EQ(kSize,
|
| + ReadData(entry, 1, 23000 + kSize - 35, buffer2.get(), kSize));
|
| + EXPECT_TRUE(!memcmp(buffer2->data(), buffer1->data() + kSize - 35, 35));
|
| +
|
| + // Read slightly before the last write.
|
| + CacheTestFillBuffer(buffer2->data(), kSize, true);
|
| + EXPECT_EQ(kSize, ReadData(entry, 1, 24900, buffer2.get(), kSize));
|
| + EXPECT_TRUE(!memcmp(buffer2->data(), zeros, 100));
|
| + EXPECT_TRUE(!memcmp(buffer2->data() + 100, buffer1->data(), kSize - 100));
|
| +
|
| + // Extend the entry a little more.
|
| + EXPECT_EQ(kSize, WriteData(entry, 1, 26000, buffer1.get(), kSize, true));
|
| + EXPECT_EQ(26000 + kSize, entry->GetDataSize(1));
|
| + CacheTestFillBuffer(buffer2->data(), kSize, true);
|
| + EXPECT_EQ(kSize, ReadData(entry, 1, 25900, buffer2.get(), kSize));
|
| + EXPECT_TRUE(!memcmp(buffer2->data(), zeros, 100));
|
| + EXPECT_TRUE(!memcmp(buffer2->data() + 100, buffer1->data(), kSize - 100));
|
| +
|
| + // And now reduce the size.
|
| + EXPECT_EQ(kSize, WriteData(entry, 1, 25000, buffer1.get(), kSize, true));
|
| + EXPECT_EQ(25000 + kSize, entry->GetDataSize(1));
|
| + EXPECT_EQ(28, ReadData(entry, 1, 25000 + kSize - 28, buffer2.get(), kSize));
|
| + EXPECT_TRUE(!memcmp(buffer2->data(), buffer1->data() + kSize - 28, 28));
|
| +
|
| + // Reduce the size with a buffer that is not extending the size.
|
| + EXPECT_EQ(kSize, WriteData(entry, 1, 24000, buffer1.get(), kSize, false));
|
| + EXPECT_EQ(25000 + kSize, entry->GetDataSize(1));
|
| + EXPECT_EQ(kSize, WriteData(entry, 1, 24500, buffer1.get(), kSize, true));
|
| + EXPECT_EQ(24500 + kSize, entry->GetDataSize(1));
|
| + EXPECT_EQ(kSize, ReadData(entry, 1, 23900, buffer2.get(), kSize));
|
| + EXPECT_TRUE(!memcmp(buffer2->data(), zeros, 100));
|
| + EXPECT_TRUE(!memcmp(buffer2->data() + 100, buffer1->data(), kSize - 100));
|
| +
|
| + // And now reduce the size below the old size.
|
| + EXPECT_EQ(kSize, WriteData(entry, 1, 19000, buffer1.get(), kSize, true));
|
| + EXPECT_EQ(19000 + kSize, entry->GetDataSize(1));
|
| + EXPECT_EQ(kSize, ReadData(entry, 1, 18900, buffer2.get(), kSize));
|
| + EXPECT_TRUE(!memcmp(buffer2->data(), zeros, 100));
|
| + EXPECT_TRUE(!memcmp(buffer2->data() + 100, buffer1->data(), kSize - 100));
|
| +
|
| + // Verify that the actual file is truncated.
|
| + entry->Close();
|
| + ASSERT_EQ(net::OK, OpenEntry(key, &entry));
|
| + EXPECT_EQ(19000 + kSize, entry->GetDataSize(1));
|
| +
|
| + // Extend the newly opened file with a zero length write, expect zero fill.
|
| + EXPECT_EQ(0, WriteData(entry, 1, 20000 + kSize, buffer1.get(), 0, false));
|
| + EXPECT_EQ(kSize, ReadData(entry, 1, 19000 + kSize, buffer1.get(), kSize));
|
| + EXPECT_EQ(0, memcmp(buffer1->data(), zeros, kSize));
|
| +
|
| + entry->Close();
|
| +}
|
| +
|
| +// Write more than the total cache capacity but to a single entry. |max_size|
|
| +// is the maximum size of the cache.
|
| +void ReuseEntryTest(const int max_size,
|
| + DiskCacheTest* test) {
|
| + const int write_size = max_size / 10;
|
| +
|
| + std::string key1("the first key");
|
| + disk_cache::Entry* entry;
|
| + ASSERT_EQ(net::OK, test->CreateEntry(key1, &entry));
|
| +
|
| + entry->Close();
|
| + std::string key2("the second key");
|
| + ASSERT_EQ(net::OK, test->CreateEntry(key2, &entry));
|
| +
|
| + scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(write_size));
|
| + CacheTestFillBuffer(buffer->data(), write_size, false);
|
| +
|
| + for (int j = 0; j < 15; j++) {
|
| + EXPECT_EQ(0, test->WriteData(entry, 0, 0, buffer.get(), 0, true));
|
| + EXPECT_EQ(write_size, test->WriteData(entry, 0, 0, buffer.get(), write_size, false));
|
| + entry->Close();
|
| + ASSERT_EQ(net::OK, test->OpenEntry(key2, &entry));
|
| + }
|
| +
|
| + entry->Close();
|
| + ASSERT_EQ(net::OK, test->OpenEntry(key1, &entry)) << "have not evicted this entry";
|
| + entry->Close();
|
| +}
|
| +
|
| +// Some extra tests to make sure that buffering works properly when changing
|
| +// the entry size.
|
| +TEST_P(DiskCacheEntryTest, ReuseExternalEntry) {
|
| + const int max_size = 200 * 1024;
|
| + SetMaxSize(max_size);
|
| + InitCache();
|
| + ReuseEntryTest(max_size, this);
|
| +}
|
| +
|
| +TEST_P(DiskCacheEntryTest, ReuseInternalEntry) {
|
| + const int max_size = 100 * 1024;
|
| + SetMaxSize(max_size);
|
| + InitCache();
|
| + ReuseEntryTest(max_size, this);
|
| +}
|
| +
|
| +// Reading somewhere that was not written should return zeros.
|
| +TEST_P(DiskCacheEntryTest, InvalidData) {
|
| + InitCache();
|
| +
|
| + std::string key("the first key");
|
| + disk_cache::Entry* entry;
|
| + ASSERT_EQ(net::OK, CreateEntry(key, &entry));
|
| +
|
| + const int kSize1 = 20000;
|
| + const int kSize2 = 20000;
|
| + const int kSize3 = 20000;
|
| + scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize1));
|
| + scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize2));
|
| + scoped_refptr<net::IOBuffer> buffer3(new net::IOBuffer(kSize3));
|
| +
|
| + CacheTestFillBuffer(buffer1->data(), kSize1, false);
|
| + memset(buffer2->data(), 0, kSize2);
|
| +
|
| + // Simple data grow:
|
| + EXPECT_EQ(200, WriteData(entry, 0, 400, buffer1.get(), 200, false));
|
| + EXPECT_EQ(600, entry->GetDataSize(0));
|
| + EXPECT_EQ(100, ReadData(entry, 0, 300, buffer3.get(), 100));
|
| + EXPECT_TRUE(!memcmp(buffer3->data(), buffer2->data(), 100));
|
| + entry->Close();
|
| + ASSERT_EQ(net::OK, OpenEntry(key, &entry));
|
| +
|
| + // The entry is now on disk. Load it and extend it.
|
| + EXPECT_EQ(200, WriteData(entry, 0, 800, buffer1.get(), 200, false));
|
| + EXPECT_EQ(1000, entry->GetDataSize(0));
|
| + EXPECT_EQ(100, ReadData(entry, 0, 700, buffer3.get(), 100));
|
| + EXPECT_TRUE(!memcmp(buffer3->data(), buffer2->data(), 100));
|
| + entry->Close();
|
| + ASSERT_EQ(net::OK, OpenEntry(key, &entry));
|
| +
|
| + // This time using truncate.
|
| + EXPECT_EQ(200, WriteData(entry, 0, 1800, buffer1.get(), 200, true));
|
| + EXPECT_EQ(2000, entry->GetDataSize(0));
|
| + EXPECT_EQ(100, ReadData(entry, 0, 1500, buffer3.get(), 100));
|
| + EXPECT_TRUE(!memcmp(buffer3->data(), buffer2->data(), 100));
|
| +
|
| + // Go to an external file.
|
| + EXPECT_EQ(200, WriteData(entry, 0, 19800, buffer1.get(), 200, false));
|
| + EXPECT_EQ(20000, entry->GetDataSize(0));
|
| + EXPECT_EQ(4000, ReadData(entry, 0, 14000, buffer3.get(), 4000));
|
| + EXPECT_TRUE(!memcmp(buffer3->data(), buffer2->data(), 4000));
|
| +
|
| + // And back to an internal block.
|
| + EXPECT_EQ(600, WriteData(entry, 0, 1000, buffer1.get(), 600, true));
|
| + EXPECT_EQ(1600, entry->GetDataSize(0));
|
| + EXPECT_EQ(600, ReadData(entry, 0, 1000, buffer3.get(), 600));
|
| + EXPECT_TRUE(!memcmp(buffer3->data(), buffer1->data(), 600));
|
| +
|
| + // Extend it again.
|
| + EXPECT_EQ(600, WriteData(entry, 0, 2000, buffer1.get(), 600, false));
|
| + EXPECT_EQ(2600, entry->GetDataSize(0));
|
| + EXPECT_EQ(200, ReadData(entry, 0, 1800, buffer3.get(), 200));
|
| + EXPECT_TRUE(!memcmp(buffer3->data(), buffer2->data(), 200));
|
| +
|
| + // And again (with truncation flag).
|
| + EXPECT_EQ(600, WriteData(entry, 0, 3000, buffer1.get(), 600, true));
|
| + EXPECT_EQ(3600, entry->GetDataSize(0));
|
| + EXPECT_EQ(200, ReadData(entry, 0, 2800, buffer3.get(), 200));
|
| + EXPECT_TRUE(!memcmp(buffer3->data(), buffer2->data(), 200));
|
| +
|
| + entry->Close();
|
| +}
|
| +
|
| +// Tests that the cache preserves the buffer of an IO operation.
|
| +TEST_P(DiskCacheEntryTest, ReadWriteDestroyBuffer) {
|
| + InitCache();
|
| +
|
| + std::string key("the first key");
|
| + disk_cache::Entry* entry;
|
| + ASSERT_EQ(net::OK, CreateEntry(key, &entry));
|
| +
|
| + const int kSize = 200;
|
| + scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
|
| + CacheTestFillBuffer(buffer->data(), kSize, false);
|
| +
|
| + net::TestCompletionCallback cb;
|
| + int result = entry->WriteData(0, 0, buffer.get(), kSize,
|
| + cb.callback(), false);
|
| + // Release our reference to the buffer.
|
| + buffer = NULL;
|
| + EXPECT_EQ(kSize, cb.GetResult(result));
|
| +
|
| + // And now test with a Read().
|
| + buffer = new net::IOBuffer(kSize);
|
| + CacheTestFillBuffer(buffer->data(), kSize, false);
|
| +
|
| + result = entry->ReadData(0, 0, buffer.get(), kSize, cb.callback());
|
| + buffer = NULL;
|
| + EXPECT_EQ(kSize, cb.GetResult(result));
|
| +
|
| + entry->Close();
|
| +}
|
| +
|
| +TEST_P(DiskCacheEntryTest, DoomEntry) {
|
| + InitCache();
|
| +
|
| + std::string key("the first key");
|
| + disk_cache::Entry* entry;
|
| + ASSERT_EQ(net::OK, CreateEntry(key, &entry));
|
| + entry->Doom();
|
| + entry->Close();
|
| +
|
| + const int kSize = 20000;
|
| + scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
|
| + CacheTestFillBuffer(buffer->data(), kSize, true);
|
| + buffer->data()[19999] = '\0';
|
| +
|
| + key = buffer->data();
|
| + ASSERT_EQ(net::OK, CreateEntry(key, &entry));
|
| + EXPECT_EQ(20000, WriteData(entry, 0, 0, buffer.get(), kSize, false));
|
| + EXPECT_EQ(20000, WriteData(entry, 1, 0, buffer.get(), kSize, false));
|
| + entry->Doom();
|
| + entry->Close();
|
| +
|
| + FlushQueueForTest();
|
| + EXPECT_EQ(0, cache()->GetEntryCount());
|
| +}
|
| +
|
| +// Tests dooming an entry that's linked to an open entry.
|
| +TEST_P(DiskCacheEntryTest, DoomNextToOpenEntry) {
|
| + InitCache();
|
| +
|
| + disk_cache::Entry* entry1;
|
| + disk_cache::Entry* entry2;
|
| + ASSERT_EQ(net::OK, CreateEntry("fixed", &entry1));
|
| + entry1->Close();
|
| + ASSERT_EQ(net::OK, CreateEntry("foo", &entry1));
|
| + entry1->Close();
|
| + ASSERT_EQ(net::OK, CreateEntry("bar", &entry1));
|
| + entry1->Close();
|
| +
|
| + ASSERT_EQ(net::OK, OpenEntry("foo", &entry1));
|
| + ASSERT_EQ(net::OK, OpenEntry("bar", &entry2));
|
| + entry2->Doom();
|
| + entry2->Close();
|
| +
|
| + ASSERT_EQ(net::OK, OpenEntry("foo", &entry2));
|
| + entry2->Doom();
|
| + entry2->Close();
|
| + entry1->Close();
|
| +
|
| + ASSERT_EQ(net::OK, OpenEntry("fixed", &entry1));
|
| + entry1->Close();
|
| +}
|
| +
|
| +// Verify that basic operations work as expected with doomed entries.
|
| +TEST_P(DiskCacheEntryTest, DoomedEntry) {
|
| + InitCache();
|
| +
|
| + std::string key("the first key");
|
| + disk_cache::Entry* entry;
|
| + ASSERT_EQ(net::OK, CreateEntry(key, &entry));
|
| + entry->Doom();
|
| +
|
| + FlushQueueForTest();
|
| + EXPECT_EQ(0, cache()->GetEntryCount());
|
| + Time initial = Time::Now();
|
| + AddDelay();
|
| +
|
| + const int kSize1 = 2000;
|
| + const int kSize2 = 2000;
|
| + scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize1));
|
| + scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize2));
|
| + CacheTestFillBuffer(buffer1->data(), kSize1, false);
|
| + memset(buffer2->data(), 0, kSize2);
|
| +
|
| + EXPECT_EQ(2000, WriteData(entry, 0, 0, buffer1.get(), 2000, false));
|
| + EXPECT_EQ(2000, ReadData(entry, 0, 0, buffer2.get(), 2000));
|
| + EXPECT_EQ(0, memcmp(buffer1->data(), buffer2->data(), kSize1));
|
| + EXPECT_EQ(key, entry->GetKey());
|
| + EXPECT_TRUE(initial < entry->GetLastModified());
|
| + EXPECT_TRUE(initial < entry->GetLastUsed());
|
| +
|
| + entry->Close();
|
| +}
|
| +
|
| +// Test that child entries in a cache backend are not visible from
|
| +// enumerations.
|
| +TEST_P(DiskCacheEntryTest, EnumerationWithSparseEntries) {
|
| + TEST_DISABLED_IF(traits()->EntryCountIncludesSparseRanges());
|
| +
|
| + InitCache();
|
| +
|
| + const int kSize = 4096;
|
| + scoped_refptr<net::IOBuffer> buf(new net::IOBuffer(kSize));
|
| + CacheTestFillBuffer(buf->data(), kSize, false);
|
| +
|
| + std::string key("the first key");
|
| + disk_cache::Entry* parent_entry;
|
| + ASSERT_EQ(net::OK, CreateEntry(key, &parent_entry));
|
| +
|
| + // Writes to the parent entry.
|
| + EXPECT_EQ(kSize,
|
| + WriteSparseData(parent_entry, 0, buf.get(), kSize));
|
| +
|
| + // This write creates a child entry and writes to it.
|
| + EXPECT_EQ(kSize,
|
| + WriteSparseData(parent_entry, 8192, buf.get(), kSize));
|
| +
|
| + parent_entry->Close();
|
| +
|
| + // Perform the enumerations.
|
| + void* iter = NULL;
|
| + disk_cache::Entry* entry = NULL;
|
| + int count = 0;
|
| + while (OpenNextEntry(&iter, &entry) == net::OK) {
|
| + ASSERT_TRUE(entry != NULL);
|
| + ++count;
|
| + entry->Close();
|
| + }
|
| + EXPECT_EQ(1, count);
|
| +}
|
| +
|
| +// Writes |buf_1| to offset and reads it back as |buf_2|.
|
| +void VerifySparseIO(disk_cache::Entry* entry, int64 offset,
|
| + net::IOBuffer* buf_1, int size, net::IOBuffer* buf_2) {
|
| + net::TestCompletionCallback cb;
|
| +
|
| + memset(buf_2->data(), 0, size);
|
| + int ret = entry->ReadSparseData(offset, buf_2, size, cb.callback());
|
| + EXPECT_EQ(0, cb.GetResult(ret));
|
| +
|
| + ret = entry->WriteSparseData(offset, buf_1, size, cb.callback());
|
| + EXPECT_EQ(size, cb.GetResult(ret));
|
| +
|
| + ret = entry->ReadSparseData(offset, buf_2, size, cb.callback());
|
| + EXPECT_EQ(size, cb.GetResult(ret));
|
| +
|
| + EXPECT_EQ(0, memcmp(buf_1->data(), buf_2->data(), size));
|
| +}
|
| +
|
| +// Reads |size| bytes from |entry| at |offset| and verifies that they are the
|
| +// same as the content of the provided |buffer|.
|
| +void VerifyContentSparseIO(disk_cache::Entry* entry, int64 offset, char* buffer,
|
| + int size) {
|
| + net::TestCompletionCallback cb;
|
| +
|
| + scoped_refptr<net::IOBuffer> buf_1(new net::IOBuffer(size));
|
| + memset(buf_1->data(), 0, size);
|
| + int ret = entry->ReadSparseData(offset, buf_1.get(), size, cb.callback());
|
| + EXPECT_EQ(size, cb.GetResult(ret));
|
| + EXPECT_EQ(0, memcmp(buf_1->data(), buffer, size));
|
| +}
|
| +
|
| +TEST_P(DiskCacheEntryTest, BasicSparseIO) {
|
| + InitCache();
|
| +
|
| + std::string key("the first key");
|
| + disk_cache::Entry* entry;
|
| + ASSERT_EQ(net::OK, CreateEntry(key, &entry));
|
| +
|
| + const int kSize = 2048;
|
| + scoped_refptr<net::IOBuffer> buf_1(new net::IOBuffer(kSize));
|
| + scoped_refptr<net::IOBuffer> buf_2(new net::IOBuffer(kSize));
|
| + CacheTestFillBuffer(buf_1->data(), kSize, false);
|
| +
|
| + // Write at offset 0.
|
| + VerifySparseIO(entry, 0, buf_1.get(), kSize, buf_2.get());
|
| +
|
| + // Write at offset 0x400000 (4 MB).
|
| + VerifySparseIO(entry, 0x400000, buf_1.get(), kSize, buf_2.get());
|
| +
|
| + // Write at offset 0x800000000 (32 GB).
|
| + VerifySparseIO(entry, 0x800000000LL, buf_1.get(), kSize, buf_2.get());
|
| +
|
| + entry->Close();
|
| +
|
| + // Check everything again.
|
| + ASSERT_EQ(net::OK, OpenEntry(key, &entry));
|
| + VerifyContentSparseIO(entry, 0, buf_1->data(), kSize);
|
| + VerifyContentSparseIO(entry, 0x400000, buf_1->data(), kSize);
|
| + VerifyContentSparseIO(entry, 0x800000000LL, buf_1->data(), kSize);
|
| + entry->Close();
|
| +}
|
| +
|
| +TEST_P(DiskCacheEntryTest, HugeSparseIO) {
|
| + InitCache();
|
| + std::string key("the first key");
|
| + disk_cache::Entry* entry;
|
| + ASSERT_EQ(net::OK, CreateEntry(key, &entry));
|
| +
|
| + // Write 1.2 MB so that we cover multiple entries.
|
| + const int kSize = 1200 * 1024;
|
| + scoped_refptr<net::IOBuffer> buf_1(new net::IOBuffer(kSize));
|
| + scoped_refptr<net::IOBuffer> buf_2(new net::IOBuffer(kSize));
|
| + CacheTestFillBuffer(buf_1->data(), kSize, false);
|
| +
|
| + // Write at offset 0x20F0000 (33 MB - 64 KB).
|
| + VerifySparseIO(entry, 0x20F0000, buf_1.get(), kSize, buf_2.get());
|
| + entry->Close();
|
| +
|
| + // Check it again.
|
| + ASSERT_EQ(net::OK, OpenEntry(key, &entry));
|
| + VerifyContentSparseIO(entry, 0x20F0000, buf_1->data(), kSize);
|
| + entry->Close();
|
| +}
|
| +
|
| +TEST_P(DiskCacheEntryTest, GetAvailableRange) {
|
| + InitCache();
|
| +
|
| + std::string key("the first key");
|
| + disk_cache::Entry* entry;
|
| + ASSERT_EQ(net::OK, CreateEntry(key, &entry));
|
| +
|
| + const int kSize = 16 * 1024;
|
| + scoped_refptr<net::IOBuffer> buf(new net::IOBuffer(kSize));
|
| + CacheTestFillBuffer(buf->data(), kSize, false);
|
| +
|
| + // Write at offset 0x20F0000 (33 MB - 64 KB), and 0x20F4400 (33 MB - 47 KB).
|
| + EXPECT_EQ(kSize, WriteSparseData(entry, 0x20F0000, buf.get(), kSize));
|
| + EXPECT_EQ(kSize, WriteSparseData(entry, 0x20F4400, buf.get(), kSize));
|
| +
|
| + // We stop at the first empty block.
|
| + int64 start;
|
| + net::TestCompletionCallback cb;
|
| + int rv = entry->GetAvailableRange(
|
| + 0x20F0000, kSize * 2, &start, cb.callback());
|
| + EXPECT_EQ(kSize, cb.GetResult(rv));
|
| + EXPECT_EQ(0x20F0000, start);
|
| +
|
| + start = 0;
|
| + rv = entry->GetAvailableRange(0, kSize, &start, cb.callback());
|
| + EXPECT_EQ(0, cb.GetResult(rv));
|
| + rv = entry->GetAvailableRange(
|
| + 0x20F0000 - kSize, kSize, &start, cb.callback());
|
| + EXPECT_EQ(0, cb.GetResult(rv));
|
| + rv = entry->GetAvailableRange(0, 0x2100000, &start, cb.callback());
|
| + EXPECT_EQ(kSize, cb.GetResult(rv));
|
| + EXPECT_EQ(0x20F0000, start);
|
| +
|
| + // We should be able to Read based on the results of GetAvailableRange.
|
| + start = -1;
|
| + rv = entry->GetAvailableRange(0x2100000, kSize, &start, cb.callback());
|
| + EXPECT_EQ(0, cb.GetResult(rv));
|
| + rv = entry->ReadSparseData(start, buf.get(), kSize, cb.callback());
|
| + EXPECT_EQ(0, cb.GetResult(rv));
|
| +
|
| + start = 0;
|
| + rv = entry->GetAvailableRange(0x20F2000, kSize, &start, cb.callback());
|
| + EXPECT_EQ(0x2000, cb.GetResult(rv));
|
| + EXPECT_EQ(0x20F2000, start);
|
| + EXPECT_EQ(0x2000, ReadSparseData(entry, start, buf.get(), kSize));
|
| +
|
| + // Make sure that we respect the |len| argument.
|
| + start = 0;
|
| + rv = entry->GetAvailableRange(
|
| + 0x20F0001 - kSize, kSize, &start, cb.callback());
|
| + EXPECT_EQ(1, cb.GetResult(rv));
|
| + EXPECT_EQ(0x20F0000, start);
|
| +
|
| + entry->Close();
|
| +}
|
| +
|
| +TEST_P(DiskCacheEntryTest, CouldBeSparse) {
|
| + TEST_DISABLED_IF(!traits()->ImplementsCouldBeSparse());
|
| +
|
| + InitCache();
|
| +
|
| + std::string key("the first key");
|
| + disk_cache::Entry* entry;
|
| + ASSERT_EQ(net::OK, CreateEntry(key, &entry));
|
| +
|
| + const int kSize = 16 * 1024;
|
| + scoped_refptr<net::IOBuffer> buf(new net::IOBuffer(kSize));
|
| + CacheTestFillBuffer(buf->data(), kSize, false);
|
| +
|
| + // Write at offset 0x20F0000 (33 MB - 64 KB).
|
| + EXPECT_EQ(kSize, WriteSparseData(entry, 0x20F0000, buf.get(), kSize));
|
| +
|
| + EXPECT_TRUE(entry->CouldBeSparse());
|
| + entry->Close();
|
| +
|
| + ASSERT_EQ(net::OK, OpenEntry(key, &entry));
|
| + EXPECT_TRUE(entry->CouldBeSparse());
|
| + entry->Close();
|
| +
|
| + // Now verify a regular entry.
|
| + key.assign("another key");
|
| + ASSERT_EQ(net::OK, CreateEntry(key, &entry));
|
| + EXPECT_FALSE(entry->CouldBeSparse());
|
| +
|
| + EXPECT_EQ(kSize, WriteData(entry, 0, 0, buf.get(), kSize, false));
|
| + EXPECT_EQ(kSize, WriteData(entry, 1, 0, buf.get(), kSize, false));
|
| + EXPECT_EQ(kSize, WriteData(entry, 2, 0, buf.get(), kSize, false));
|
| +
|
| + EXPECT_FALSE(entry->CouldBeSparse());
|
| + entry->Close();
|
| +
|
| + ASSERT_EQ(net::OK, OpenEntry(key, &entry));
|
| + EXPECT_FALSE(entry->CouldBeSparse());
|
| + entry->Close();
|
| +}
|
| +
|
| +TEST_P(DiskCacheEntryTest, UnalignedGetAvailableRange) {
|
| + TEST_DISABLED_IF(traits()->SparseRoundingInterval() != 1);
|
| + InitCache();
|
| +
|
| + const int kSize = 8192;
|
| + scoped_refptr<net::IOBuffer> buf(new net::IOBuffer(kSize));
|
| + CacheTestFillBuffer(buf->data(), kSize, false);
|
| +
|
| + disk_cache::Entry* entry;
|
| + std::string key("the first key");
|
| + ASSERT_EQ(net::OK, CreateEntry(key, &entry));
|
| +
|
| + // Writes in the middle of an entry.
|
| + EXPECT_EQ(1024, WriteSparseData(entry, 0, buf.get(), 1024));
|
| + EXPECT_EQ(1024, WriteSparseData(entry, 5120, buf.get(), 1024));
|
| + EXPECT_EQ(1024, WriteSparseData(entry, 10000, buf.get(), 1024));
|
| +
|
| + // Writes in the middle of an entry and spans 2 child entries.
|
| + EXPECT_EQ(8192, WriteSparseData(entry, 50000, buf.get(), 8192));
|
| +
|
| + int64 start;
|
| + net::TestCompletionCallback cb;
|
| + // Test that we stop at a discontinuous child at the second block.
|
| + int rv = entry->GetAvailableRange(0, 10000, &start, cb.callback());
|
| + EXPECT_EQ(1024, cb.GetResult(rv));
|
| + EXPECT_EQ(0, start);
|
| +
|
| + // Test that number of bytes is reported correctly when we start from the
|
| + // middle of a filled region.
|
| + rv = entry->GetAvailableRange(512, 10000, &start, cb.callback());
|
| + EXPECT_EQ(512, cb.GetResult(rv));
|
| + EXPECT_EQ(512, start);
|
| +
|
| + // Test that we found bytes in the child of next block.
|
| + rv = entry->GetAvailableRange(1024, 10000, &start, cb.callback());
|
| + EXPECT_EQ(1024, cb.GetResult(rv));
|
| + EXPECT_EQ(5120, start);
|
| +
|
| + // Test that the desired length is respected. It starts within a filled
|
| + // region.
|
| + rv = entry->GetAvailableRange(5500, 512, &start, cb.callback());
|
| + EXPECT_EQ(512, cb.GetResult(rv));
|
| + EXPECT_EQ(5500, start);
|
| +
|
| + // Test that the desired length is respected. It starts before a filled
|
| + // region.
|
| + rv = entry->GetAvailableRange(5000, 620, &start, cb.callback());
|
| + EXPECT_EQ(500, cb.GetResult(rv));
|
| + EXPECT_EQ(5120, start);
|
| +
|
| + // Test that multiple blocks are scanned.
|
| + rv = entry->GetAvailableRange(40000, 20000, &start, cb.callback());
|
| + EXPECT_EQ(8192, cb.GetResult(rv));
|
| + EXPECT_EQ(50000, start);
|
| +
|
| + entry->Close();
|
| +}
|
| +
|
| +TEST_P(DiskCacheEntryTest, UnalignedSparseIO) {
|
| + TEST_DISABLED_IF(traits()->SparseRoundingInterval() != 1);
|
| + InitCache();
|
| +
|
| + const int kSize = 8192;
|
| + scoped_refptr<net::IOBuffer> buf_1(new net::IOBuffer(kSize));
|
| + scoped_refptr<net::IOBuffer> buf_2(new net::IOBuffer(kSize));
|
| + CacheTestFillBuffer(buf_1->data(), kSize, false);
|
| +
|
| + std::string key("the first key");
|
| + disk_cache::Entry* entry;
|
| + ASSERT_EQ(net::OK, CreateEntry(key, &entry));
|
| +
|
| + // This loop writes back to back starting from offset 0 and 9000.
|
| + for (int i = 0; i < kSize; i += 1024) {
|
| + scoped_refptr<net::WrappedIOBuffer> buf_3(
|
| + new net::WrappedIOBuffer(buf_1->data() + i));
|
| + VerifySparseIO(entry, i, buf_3.get(), 1024, buf_2.get());
|
| + VerifySparseIO(entry, 9000 + i, buf_3.get(), 1024, buf_2.get());
|
| + }
|
| +
|
| + // Make sure we have data written.
|
| + VerifyContentSparseIO(entry, 0, buf_1->data(), kSize);
|
| + VerifyContentSparseIO(entry, 9000, buf_1->data(), kSize);
|
| +
|
| + // This tests a large write that spans 3 entries from a misaligned offset.
|
| + VerifySparseIO(entry, 20481, buf_1.get(), 8192, buf_2.get());
|
| +
|
| + entry->Close();
|
| +}
|
| +
|
| +TEST_P(DiskCacheEntryTest, UpdateSparseEntry) {
|
| + InitCache();
|
| +
|
| + std::string key("the first key");
|
| + disk_cache::Entry* entry1;
|
| + ASSERT_EQ(net::OK, CreateEntry(key, &entry1));
|
| +
|
| + const int kSize = 2048;
|
| + scoped_refptr<net::IOBuffer> buf_1(new net::IOBuffer(kSize));
|
| + scoped_refptr<net::IOBuffer> buf_2(new net::IOBuffer(kSize));
|
| + CacheTestFillBuffer(buf_1->data(), kSize, false);
|
| +
|
| + // Write at offset 0.
|
| + VerifySparseIO(entry1, 0, buf_1.get(), kSize, buf_2.get());
|
| + entry1->Close();
|
| +
|
| + // Write at offset 2048.
|
| + ASSERT_EQ(net::OK, OpenEntry(key, &entry1));
|
| + VerifySparseIO(entry1, 2048, buf_1.get(), kSize, buf_2.get());
|
| +
|
| + disk_cache::Entry* entry2;
|
| + ASSERT_EQ(net::OK, CreateEntry("the second key", &entry2));
|
| +
|
| + entry1->Close();
|
| + entry2->Close();
|
| + FlushQueueForTest();
|
| + if (traits()->EntryCountIncludesSparseRanges())
|
| + EXPECT_EQ(3, cache()->GetEntryCount());
|
| + else
|
| + EXPECT_EQ(2, cache()->GetEntryCount());
|
| +}
|
| +
|
| +TEST_P(DiskCacheEntryTest, DoomSparseEntry) {
|
| + UseCurrentThread();
|
| + InitCache();
|
| +
|
| + std::string key1("the first key");
|
| + std::string key2("the second key");
|
| + disk_cache::Entry *entry1, *entry2;
|
| + ASSERT_EQ(net::OK, CreateEntry(key1, &entry1));
|
| + ASSERT_EQ(net::OK, CreateEntry(key2, &entry2));
|
| +
|
| + const int kSize = 4 * 1024;
|
| + scoped_refptr<net::IOBuffer> buf(new net::IOBuffer(kSize));
|
| + CacheTestFillBuffer(buf->data(), kSize, false);
|
| +
|
| + int64 offset = 1024;
|
| + // Write to a bunch of ranges.
|
| + for (int i = 0; i < 12; i++) {
|
| + EXPECT_EQ(kSize, WriteSparseData(entry1, offset, buf.get(), kSize));
|
| + // Keep the second map under the default size.
|
| + if (i < 9)
|
| + EXPECT_EQ(kSize, WriteSparseData(entry2, offset, buf.get(), kSize));
|
| +
|
| + offset *= 4;
|
| + }
|
| +
|
| + if (traits()->EntryCountIncludesSparseRanges())
|
| + EXPECT_EQ(15, cache()->GetEntryCount());
|
| + else
|
| + EXPECT_EQ(2, cache()->GetEntryCount());
|
| +
|
| + // Doom the first entry while it's still open.
|
| + entry1->Doom();
|
| + entry1->Close();
|
| + entry2->Close();
|
| +
|
| + // Doom the second entry after it's fully saved.
|
| + EXPECT_EQ(net::OK, DoomEntry(key2));
|
| +
|
| + // Make sure we do all needed work. This may fail for entry2 if between Close
|
| + // and DoomEntry the system decides to remove all traces of the file from the
|
| + // system cache so we don't see that there is pending IO.
|
| + base::MessageLoop::current()->RunUntilIdle();
|
| +
|
| + if (5 == cache()->GetEntryCount()) {
|
| + // Most likely we are waiting for the result of reading the sparse info
|
| + // (it's always async on Posix so it is easy to miss). Unfortunately we
|
| + // don't have any signal to watch for so we can only wait.
|
| + base::PlatformThread::Sleep(base::TimeDelta::FromMilliseconds(500));
|
| + base::MessageLoop::current()->RunUntilIdle();
|
| + }
|
| + EXPECT_EQ(0, cache()->GetEntryCount());
|
| +}
|
| +
|
| +// A CompletionCallback to let test code run in the callback. The way a
|
| +// CompletionCallback works means that all tasks (even new ones) are executed by
|
| +// the message loop before returning to the caller so the only way to simulate a
|
| +// race is to execute what we want on the callback.
|
| +class ClosureTestCompletionCallback: public net::TestCompletionCallback {
|
| + public:
|
| + explicit ClosureTestCompletionCallback(
|
| + const base::Closure& closure) : closure_(closure) {}
|
| +
|
| + private:
|
| + virtual void SetResult(int result) OVERRIDE {
|
| + closure_.Run();
|
| + TestCompletionCallback::SetResult(result);
|
| + }
|
| +
|
| + base::Closure closure_;
|
| + DISALLOW_COPY_AND_ASSIGN(ClosureTestCompletionCallback);
|
| +};
|
| +
|
| +// Tests that we don't crash when the backend is deleted while we are working
|
| +// deleting the sub-entries of a sparse entry.
|
| +TEST_P(DiskCacheEntryTest, DoomSparseEntry2) {
|
| + TEST_DISABLED_IF(!traits()->DoomedSparseEntriesIOWorks());
|
| +
|
| + UseCurrentThread();
|
| + InitCache();
|
| +
|
| + std::string key("the key");
|
| + disk_cache::Entry* entry;
|
| + ASSERT_EQ(net::OK, CreateEntry(key, &entry));
|
| +
|
| + const int kSize = 4 * 1024;
|
| + scoped_refptr<net::IOBuffer> buf(new net::IOBuffer(kSize));
|
| + CacheTestFillBuffer(buf->data(), kSize, false);
|
| +
|
| + int64 offset = 1024;
|
| + // Write to a bunch of ranges.
|
| + for (int i = 0; i < 12; i++) {
|
| + EXPECT_EQ(kSize,
|
| + entry->WriteSparseData(
|
| + offset, buf.get(), kSize, net::CompletionCallback()));
|
| + offset *= 4;
|
| + }
|
| + if (traits()->EntryCountIncludesSparseRanges())
|
| + EXPECT_EQ(9, cache()->GetEntryCount());
|
| + else
|
| + EXPECT_EQ(1, cache()->GetEntryCount());
|
| +
|
| + entry->Close();
|
| + ClosureTestCompletionCallback cb(base::Bind(&DiskCacheEntryTest_DoomSparseEntry2_Test::DeleteCache, base::Unretained(this)));
|
| + int rv = cache()->DoomEntry(key, cb.callback());
|
| + EXPECT_EQ(net::OK, cb.GetResult(rv));
|
| +}
|
| +
|
| +TEST_P(DiskCacheEntryTest, PartialSparseEntry) {
|
| + InitCache();
|
| +
|
| + std::string key("the first key");
|
| + disk_cache::Entry* entry;
|
| + ASSERT_EQ(net::OK, CreateEntry(key, &entry));
|
| +
|
| + // We should be able to deal with IO that is not aligned to the block size
|
| + // of a sparse entry, at least to write a big range without leaving holes.
|
| + const int kSize = 4 * 1024;
|
| + const int kSmallSize = 128;
|
| + scoped_refptr<net::IOBuffer> buf1(new net::IOBuffer(kSize));
|
| + CacheTestFillBuffer(buf1->data(), kSize, false);
|
| +
|
| + // The first write is just to extend the entry. The third write occupies
|
| + // a 1KB block partially, it may not be written internally depending on the
|
| + // implementation.
|
| + EXPECT_EQ(kSize, WriteSparseData(entry, 20000, buf1.get(), kSize));
|
| + EXPECT_EQ(kSize, WriteSparseData(entry, 500, buf1.get(), kSize));
|
| + EXPECT_EQ(kSmallSize,
|
| + WriteSparseData(entry, 1080321, buf1.get(), kSmallSize));
|
| + entry->Close();
|
| + ASSERT_EQ(net::OK, OpenEntry(key, &entry));
|
| +
|
| + scoped_refptr<net::IOBuffer> buf2(new net::IOBuffer(kSize));
|
| + memset(buf2->data(), 0, kSize);
|
| + EXPECT_EQ(0, ReadSparseData(entry, 8000, buf2.get(), kSize));
|
| +
|
| + EXPECT_EQ(500, ReadSparseData(entry, kSize, buf2.get(), kSize));
|
| + EXPECT_EQ(0, memcmp(buf2->data(), buf1->data() + kSize - 500, 500));
|
| + EXPECT_EQ(0, ReadSparseData(entry, 0, buf2.get(), kSize));
|
| +
|
| + // This read should not change anything.
|
| + EXPECT_EQ(96, ReadSparseData(entry, 24000, buf2.get(), kSize));
|
| + EXPECT_EQ(500, ReadSparseData(entry, kSize, buf2.get(), kSize));
|
| + EXPECT_EQ(0, ReadSparseData(entry, 99, buf2.get(), kSize));
|
| +
|
| + int rv;
|
| + int64 start;
|
| + net::TestCompletionCallback cb;
|
| + if (traits()->SparseRoundingInterval() == 1024) {
|
| + rv = entry->GetAvailableRange(0, 2048, &start, cb.callback());
|
| + EXPECT_EQ(1024, cb.GetResult(rv));
|
| + EXPECT_EQ(1024, start);
|
| + } else {
|
| + rv = entry->GetAvailableRange(0, 600, &start, cb.callback());
|
| + EXPECT_EQ(100, cb.GetResult(rv));
|
| + EXPECT_EQ(500, start);
|
| + }
|
| +
|
| + rv = entry->GetAvailableRange(kSize, kSize, &start, cb.callback());
|
| + EXPECT_EQ(500, cb.GetResult(rv));
|
| + EXPECT_EQ(kSize, start);
|
| + rv = entry->GetAvailableRange(20 * 1024, 10000, &start, cb.callback());
|
| + EXPECT_EQ(3616, cb.GetResult(rv));
|
| + EXPECT_EQ(20 * 1024, start);
|
| +
|
| + // 1. Query before a filled 1KB block.
|
| + // 2. Query within a filled 1KB block.
|
| + // 3. Query beyond a filled 1KB block.
|
| +
|
| + if (traits()->SparseRoundingInterval() == 1024) {
|
| + rv = entry->GetAvailableRange(19400, kSize, &start, cb.callback());
|
| + EXPECT_EQ(3016, cb.GetResult(rv));
|
| + EXPECT_EQ(20480, start);
|
| + } else {
|
| + rv = entry->GetAvailableRange(19400, kSize, &start, cb.callback());
|
| + EXPECT_EQ(3496, cb.GetResult(rv));
|
| + EXPECT_EQ(20000, start);
|
| + }
|
| +
|
| + rv = entry->GetAvailableRange(3073, kSize, &start, cb.callback());
|
| + EXPECT_EQ(1523, cb.GetResult(rv));
|
| + EXPECT_EQ(3073, start);
|
| + rv = entry->GetAvailableRange(4600, kSize, &start, cb.callback());
|
| + EXPECT_EQ(0, cb.GetResult(rv));
|
| + EXPECT_EQ(4600, start);
|
| +
|
| + // Now make another write and verify that there is no hole in between.
|
| + EXPECT_EQ(kSize, WriteSparseData(entry, 500 + kSize, buf1.get(), kSize));
|
| + rv = entry->GetAvailableRange(1024, 10000, &start, cb.callback());
|
| + EXPECT_EQ(7 * 1024 + 500, cb.GetResult(rv));
|
| + EXPECT_EQ(1024, start);
|
| + EXPECT_EQ(kSize, ReadSparseData(entry, kSize, buf2.get(), kSize));
|
| + EXPECT_EQ(0, memcmp(buf2->data(), buf1->data() + kSize - 500, 500));
|
| + EXPECT_EQ(0, memcmp(buf2->data() + 500, buf1->data(), kSize - 500));
|
| +
|
| + entry->Close();
|
| +}
|
| +
|
| +
|
| +} // namespace
|
| +
|
| +} // namespace disk_cache
|
|
|