OLD | NEW |
(Empty) | |
| 1 // Copyright (c) 2014 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. |
| 4 |
| 5 #include <set> |
| 6 #include <string> |
| 7 |
| 8 #include "base/file_util.h" |
| 9 #include "base/platform_file.h" |
| 10 #include "base/strings/string_util.h" |
| 11 #include "base/strings/stringprintf.h" |
| 12 #include "net/base/io_buffer.h" |
| 13 #include "net/base/test_completion_callback.h" |
| 14 #include "net/disk_cache/backend_tests.h" |
| 15 #include "net/disk_cache/cache_util.h" |
| 16 #include "net/disk_cache/disk_cache_test.h" |
| 17 #include "net/disk_cache/disk_cache_test_util.h" |
| 18 #include "net/disk_cache/entry_tests.h" |
| 19 #include "net/disk_cache/simple/simple_backend_impl.h" |
| 20 #include "net/disk_cache/simple/simple_index.h" |
| 21 #include "net/disk_cache/simple/simple_entry_format.h" |
| 22 #include "net/disk_cache/simple/simple_entry_impl.h" |
| 23 #include "net/disk_cache/simple/simple_synchronous_entry.h" |
| 24 #include "net/disk_cache/simple/simple_test_util.h" |
| 25 #include "net/disk_cache/simple/simple_util.h" |
| 26 #include "base/single_thread_task_runner.h" |
| 27 #include "base/memory/scoped_ptr.h" |
| 28 |
| 29 namespace disk_cache { |
| 30 |
| 31 namespace { |
| 32 |
| 33 class SimpleCacheCreateBackendExtraData |
| 34 : public BackendTestTraits::CreateBackendExtraData { |
| 35 public: |
| 36 SimpleCacheCreateBackendExtraData() : do_not_wait_for_index_(false) {} |
| 37 |
| 38 bool do_not_wait_for_index() const { return do_not_wait_for_index_; } |
| 39 void set_do_not_wait_for_index(bool do_not_wait_for_index) { |
| 40 do_not_wait_for_index_ = do_not_wait_for_index; |
| 41 } |
| 42 |
| 43 private: |
| 44 bool do_not_wait_for_index_; |
| 45 }; |
| 46 |
| 47 class SimpleCacheBackendTraits : public BackendTestTraits { |
| 48 public: |
| 49 virtual ~SimpleCacheBackendTraits() {} |
| 50 |
| 51 virtual Backend* CreateBackend( |
| 52 const CreateBackendExtraData* extra_data, |
| 53 const base::FilePath& cache_path, |
| 54 int max_size, |
| 55 base::MessageLoopProxy* task_runner) const OVERRIDE { |
| 56 const SimpleCacheCreateBackendExtraData* simple_cache_extra_data = |
| 57 static_cast<const SimpleCacheCreateBackendExtraData*>(extra_data); |
| 58 |
| 59 net::TestCompletionCallback cb; |
| 60 scoped_ptr<SimpleBackendImpl> simple_backend( |
| 61 new SimpleBackendImpl( |
| 62 cache_path, max_size, type_, |
| 63 make_scoped_refptr(task_runner).get(), NULL)); |
| 64 int rv = simple_backend->Init(cb.callback()); |
| 65 if (cb.GetResult(rv) != net::OK) |
| 66 return NULL; |
| 67 if (simple_cache_extra_data && |
| 68 !simple_cache_extra_data->do_not_wait_for_index()) { |
| 69 net::TestCompletionCallback wait_for_index_cb; |
| 70 rv = simple_backend->index()->ExecuteWhenReady( |
| 71 wait_for_index_cb.callback()); |
| 72 if (wait_for_index_cb.GetResult(rv) != net::OK) |
| 73 return NULL; |
| 74 } |
| 75 return simple_backend.release(); |
| 76 } |
| 77 |
| 78 virtual bool UsesCacheThread() const OVERRIDE { return false; } |
| 79 virtual bool ImplementsCouldBeSparse() const OVERRIDE { return false; } |
| 80 virtual bool DoomedSparseEntriesIOWorks() const OVERRIDE { return false; } |
| 81 |
| 82 virtual void AddDelay() const OVERRIDE { |
| 83 // The simple cache uses second resolution for many timeouts, so it's safest |
| 84 // to advance by at least whole seconds before falling back into the normal |
| 85 // disk cache epsilon advance. |
| 86 const base::Time initial_time = base::Time::Now(); |
| 87 do { |
| 88 base::PlatformThread::YieldCurrentThread(); |
| 89 } while (base::Time::Now() - |
| 90 initial_time < base::TimeDelta::FromSeconds(1)); |
| 91 |
| 92 BackendTestTraits::AddDelay(); |
| 93 } |
| 94 |
| 95 virtual bool SetMaxSize(Backend* backend, int size) const OVERRIDE { |
| 96 return static_cast<SimpleBackendImpl*>(backend)->SetMaxSize(size); |
| 97 } |
| 98 |
| 99 net::CacheType cache_type() const { return type_; } |
| 100 |
| 101 static const BackendTestTraits* DiskCache() { |
| 102 static SimpleCacheBackendTraits traits(net::DISK_CACHE); |
| 103 return &traits; |
| 104 } |
| 105 |
| 106 static const BackendTestTraits* AppCache() { |
| 107 static SimpleCacheBackendTraits traits(net::DISK_CACHE); |
| 108 return &traits; |
| 109 } |
| 110 |
| 111 private: |
| 112 explicit SimpleCacheBackendTraits(net::CacheType type) : type_(type) {} |
| 113 |
| 114 const net::CacheType type_; |
| 115 }; |
| 116 |
| 117 INSTANTIATE_TEST_CASE_P( |
| 118 SimpleCache, DiskCacheEntryTest, |
| 119 ::testing::Values(SimpleCacheBackendTraits::DiskCache(), |
| 120 SimpleCacheBackendTraits::AppCache())); |
| 121 |
| 122 INSTANTIATE_TEST_CASE_P( |
| 123 SimpleCache, DiskCacheBackendTest, |
| 124 ::testing::Values(SimpleCacheBackendTraits::DiskCache(), |
| 125 SimpleCacheBackendTraits::AppCache())); |
| 126 |
| 127 class DiskCacheSimpleEntryTest : public DiskCacheTest { |
| 128 protected: |
| 129 net::CacheType cache_type() const { return simple_traits()->cache_type(); } |
| 130 |
| 131 private: |
| 132 const SimpleCacheBackendTraits* simple_traits() const { |
| 133 return |
| 134 static_cast<const SimpleCacheBackendTraits*>(DiskCacheTest::traits()); |
| 135 } |
| 136 }; |
| 137 |
| 138 INSTANTIATE_TEST_CASE_P( |
| 139 SimpleCache, DiskCacheSimpleEntryTest, |
| 140 ::testing::Values(SimpleCacheBackendTraits::DiskCache(), |
| 141 SimpleCacheBackendTraits::AppCache())); |
| 142 |
| 143 class DiskCacheSimpleBackendTest : public DiskCacheTest {}; |
| 144 |
| 145 INSTANTIATE_TEST_CASE_P( |
| 146 SimpleCache, DiskCacheSimpleBackendTest, |
| 147 ::testing::Values(SimpleCacheBackendTraits::DiskCache(), |
| 148 SimpleCacheBackendTraits::AppCache())); |
| 149 |
| 150 // Individual unit tests and helper functions after here. |
| 151 |
| 152 bool MakeBadChecksumEntry(const std::string& key, |
| 153 DiskCacheTest* test, |
| 154 int* out_data_size) { |
| 155 disk_cache::Entry* entry = NULL; |
| 156 |
| 157 if (test->CreateEntry(key, &entry) != net::OK || !entry) { |
| 158 LOG(ERROR) << "Could not create entry"; |
| 159 return false; |
| 160 } |
| 161 |
| 162 const char data[] = "this is very good data"; |
| 163 const int kDataSize = arraysize(data); |
| 164 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kDataSize)); |
| 165 base::strlcpy(buffer->data(), data, kDataSize); |
| 166 |
| 167 EXPECT_EQ(kDataSize, test->WriteData(entry, 1, 0, buffer.get(), kDataSize, fal
se)); |
| 168 entry->Close(); |
| 169 entry = NULL; |
| 170 |
| 171 // Corrupt the last byte of the data. |
| 172 base::FilePath entry_file0_path = test->cache_path().AppendASCII( |
| 173 disk_cache::simple_util::GetFilenameFromKeyAndFileIndex(key, 0)); |
| 174 int flags = base::PLATFORM_FILE_WRITE | base::PLATFORM_FILE_OPEN; |
| 175 base::PlatformFile entry_file0 = |
| 176 base::CreatePlatformFile(entry_file0_path, flags, NULL, NULL); |
| 177 if (entry_file0 == base::kInvalidPlatformFileValue) |
| 178 return false; |
| 179 |
| 180 int64 file_offset = |
| 181 sizeof(disk_cache::SimpleFileHeader) + key.size() + kDataSize - 2; |
| 182 EXPECT_EQ(1, base::WritePlatformFile(entry_file0, file_offset, "X", 1)); |
| 183 if (!base::ClosePlatformFile(entry_file0)) |
| 184 return false; |
| 185 *out_data_size = kDataSize; |
| 186 return true; |
| 187 } |
| 188 |
| 189 // Tests that the simple cache can detect entries that have bad data. |
| 190 TEST_P(DiskCacheSimpleEntryTest, BadChecksum) { |
| 191 InitCache(); |
| 192 |
| 193 const char key[] = "the first key"; |
| 194 int size_unused; |
| 195 ASSERT_TRUE(MakeBadChecksumEntry(key, this, &size_unused)); |
| 196 |
| 197 disk_cache::Entry* entry = NULL; |
| 198 |
| 199 // Open the entry. |
| 200 ASSERT_EQ(net::OK, OpenEntry(key, &entry)); |
| 201 ScopedEntryPtr entry_closer(entry); |
| 202 |
| 203 const int kReadBufferSize = 200; |
| 204 EXPECT_GE(kReadBufferSize, entry->GetDataSize(1)); |
| 205 scoped_refptr<net::IOBuffer> read_buffer(new net::IOBuffer(kReadBufferSize)); |
| 206 EXPECT_EQ(net::ERR_CACHE_CHECKSUM_MISMATCH, |
| 207 ReadData(entry, 1, 0, read_buffer.get(), kReadBufferSize)); |
| 208 } |
| 209 |
| 210 // Tests that an entry that has had an IO error occur can still be Doomed(). |
| 211 TEST_P(DiskCacheSimpleEntryTest, ErrorThenDoom) { |
| 212 InitCache(); |
| 213 |
| 214 const char key[] = "the first key"; |
| 215 int size_unused; |
| 216 ASSERT_TRUE(MakeBadChecksumEntry(key, this, &size_unused)); |
| 217 |
| 218 disk_cache::Entry* entry = NULL; |
| 219 |
| 220 // Open the entry, forcing an IO error. |
| 221 ASSERT_EQ(net::OK, OpenEntry(key, &entry)); |
| 222 ScopedEntryPtr entry_closer(entry); |
| 223 |
| 224 const int kReadBufferSize = 200; |
| 225 EXPECT_GE(kReadBufferSize, entry->GetDataSize(1)); |
| 226 scoped_refptr<net::IOBuffer> read_buffer(new net::IOBuffer(kReadBufferSize)); |
| 227 EXPECT_EQ(net::ERR_CACHE_CHECKSUM_MISMATCH, |
| 228 ReadData(entry, 1, 0, read_buffer.get(), kReadBufferSize)); |
| 229 |
| 230 entry->Doom(); // Should not crash. |
| 231 } |
| 232 |
| 233 bool TruncatePath(const base::FilePath& file_path, int64 length) { |
| 234 const int flags = base::PLATFORM_FILE_WRITE | base::PLATFORM_FILE_OPEN; |
| 235 base::PlatformFile file = |
| 236 base::CreatePlatformFile(file_path, flags, NULL, NULL); |
| 237 if (base::kInvalidPlatformFileValue == file) |
| 238 return false; |
| 239 const bool result = base::TruncatePlatformFile(file, length); |
| 240 base::ClosePlatformFile(file); |
| 241 return result; |
| 242 } |
| 243 |
| 244 TEST_P(DiskCacheSimpleEntryTest, NoEOF) { |
| 245 InitCache(); |
| 246 |
| 247 const char key[] = "the first key"; |
| 248 |
| 249 disk_cache::Entry* entry = NULL; |
| 250 ASSERT_EQ(net::OK, CreateEntry(key, &entry)); |
| 251 disk_cache::Entry* null = NULL; |
| 252 EXPECT_NE(null, entry); |
| 253 entry->Close(); |
| 254 entry = NULL; |
| 255 |
| 256 // Force the entry to flush to disk, so subsequent platform file operations |
| 257 // succed. |
| 258 ASSERT_EQ(net::OK, OpenEntry(key, &entry)); |
| 259 entry->Close(); |
| 260 entry = NULL; |
| 261 |
| 262 // Truncate the file such that the length isn't sufficient to have an EOF |
| 263 // record. |
| 264 int kTruncationBytes = -implicit_cast<int>(sizeof(disk_cache::SimpleFileEOF)); |
| 265 const base::FilePath entry_path = cache_path().AppendASCII( |
| 266 disk_cache::simple_util::GetFilenameFromKeyAndFileIndex(key, 0)); |
| 267 const int64 invalid_size = |
| 268 disk_cache::simple_util::GetFileSizeFromKeyAndDataSize(key, |
| 269 kTruncationBytes); |
| 270 EXPECT_TRUE(TruncatePath(entry_path, invalid_size)); |
| 271 EXPECT_EQ(net::ERR_FAILED, OpenEntry(key, &entry)); |
| 272 } |
| 273 |
| 274 TEST_P(DiskCacheSimpleEntryTest, NonOptimisticOperationsBasic) { |
| 275 // Test sequence: |
| 276 // Create, Write, Read, Close. |
| 277 |
| 278 // APP_CACHE doesn't use optimistic operations. |
| 279 TEST_DISABLED_IF(cache_type() != net::APP_CACHE); |
| 280 InitCache(); |
| 281 disk_cache::Entry* const null_entry = NULL; |
| 282 |
| 283 disk_cache::Entry* entry = NULL; |
| 284 EXPECT_EQ(net::OK, CreateEntry("my key", &entry)); |
| 285 ASSERT_NE(null_entry, entry); |
| 286 ScopedEntryPtr entry_closer(entry); |
| 287 |
| 288 const int kBufferSize = 10; |
| 289 scoped_refptr<net::IOBufferWithSize> write_buffer( |
| 290 new net::IOBufferWithSize(kBufferSize)); |
| 291 CacheTestFillBuffer(write_buffer->data(), write_buffer->size(), false); |
| 292 EXPECT_EQ( |
| 293 write_buffer->size(), |
| 294 WriteData(entry, 1, 0, write_buffer.get(), write_buffer->size(), false)); |
| 295 |
| 296 scoped_refptr<net::IOBufferWithSize> read_buffer( |
| 297 new net::IOBufferWithSize(kBufferSize)); |
| 298 EXPECT_EQ(read_buffer->size(), |
| 299 ReadData(entry, 1, 0, read_buffer.get(), read_buffer->size())); |
| 300 } |
| 301 |
| 302 TEST_P(DiskCacheSimpleEntryTest, NonOptimisticOperationsDontBlock) { |
| 303 // Test sequence: |
| 304 // Create, Write, Close. |
| 305 |
| 306 // APP_CACHE doesn't use optimistic operations. |
| 307 TEST_DISABLED_IF(cache_type() != net::APP_CACHE); |
| 308 InitCache(); |
| 309 disk_cache::Entry* const null_entry = NULL; |
| 310 |
| 311 MessageLoopHelper helper; |
| 312 CallbackTest create_callback(&helper, false); |
| 313 |
| 314 int expected_callback_runs = 0; |
| 315 const int kBufferSize = 10; |
| 316 scoped_refptr<net::IOBufferWithSize> write_buffer( |
| 317 new net::IOBufferWithSize(kBufferSize)); |
| 318 |
| 319 disk_cache::Entry* entry = NULL; |
| 320 EXPECT_EQ(net::OK, CreateEntry("my key", &entry)); |
| 321 ASSERT_NE(null_entry, entry); |
| 322 ScopedEntryPtr entry_closer(entry); |
| 323 |
| 324 CacheTestFillBuffer(write_buffer->data(), write_buffer->size(), false); |
| 325 CallbackTest write_callback(&helper, false); |
| 326 int ret = entry->WriteData( |
| 327 1, |
| 328 0, |
| 329 write_buffer.get(), |
| 330 write_buffer->size(), |
| 331 base::Bind(&CallbackTest::Run, base::Unretained(&write_callback)), |
| 332 false); |
| 333 ASSERT_EQ(net::ERR_IO_PENDING, ret); |
| 334 helper.WaitUntilCacheIoFinished(++expected_callback_runs); |
| 335 } |
| 336 |
| 337 TEST_P(DiskCacheSimpleEntryTest, NonOptimisticOperationsBasicsWithoutWaiting) { |
| 338 // Test sequence: |
| 339 // Create, Write, Read, Close. |
| 340 |
| 341 // APP_CACHE doesn't use optimistic operations. |
| 342 TEST_DISABLED_IF(cache_type() != net::APP_CACHE); |
| 343 InitCache(); |
| 344 disk_cache::Entry* const null_entry = NULL; |
| 345 MessageLoopHelper helper; |
| 346 |
| 347 disk_cache::Entry* entry = NULL; |
| 348 // Note that |entry| is only set once CreateEntry() completed which is why we |
| 349 // have to wait (i.e. use the helper CreateEntry() function). |
| 350 EXPECT_EQ(net::OK, CreateEntry("my key", &entry)); |
| 351 ASSERT_NE(null_entry, entry); |
| 352 ScopedEntryPtr entry_closer(entry); |
| 353 |
| 354 const int kBufferSize = 10; |
| 355 scoped_refptr<net::IOBufferWithSize> write_buffer( |
| 356 new net::IOBufferWithSize(kBufferSize)); |
| 357 CacheTestFillBuffer(write_buffer->data(), write_buffer->size(), false); |
| 358 CallbackTest write_callback(&helper, false); |
| 359 int ret = entry->WriteData( |
| 360 1, |
| 361 0, |
| 362 write_buffer.get(), |
| 363 write_buffer->size(), |
| 364 base::Bind(&CallbackTest::Run, base::Unretained(&write_callback)), |
| 365 false); |
| 366 EXPECT_EQ(net::ERR_IO_PENDING, ret); |
| 367 int expected_callback_runs = 1; |
| 368 |
| 369 scoped_refptr<net::IOBufferWithSize> read_buffer( |
| 370 new net::IOBufferWithSize(kBufferSize)); |
| 371 CallbackTest read_callback(&helper, false); |
| 372 ret = entry->ReadData( |
| 373 1, |
| 374 0, |
| 375 read_buffer.get(), |
| 376 read_buffer->size(), |
| 377 base::Bind(&CallbackTest::Run, base::Unretained(&read_callback))); |
| 378 EXPECT_EQ(net::ERR_IO_PENDING, ret); |
| 379 ++expected_callback_runs; |
| 380 |
| 381 helper.WaitUntilCacheIoFinished(expected_callback_runs); |
| 382 ASSERT_EQ(read_buffer->size(), write_buffer->size()); |
| 383 EXPECT_EQ( |
| 384 0, |
| 385 memcmp(read_buffer->data(), write_buffer->data(), read_buffer->size())); |
| 386 } |
| 387 |
| 388 TEST_P(DiskCacheSimpleEntryTest, Optimistic) { |
| 389 // Test sequence: |
| 390 // Create, Write, Read, Write, Read, Close. |
| 391 |
| 392 // Only net::DISK_CACHE runs optimistic operations. |
| 393 TEST_DISABLED_IF(cache_type() != net::DISK_CACHE); |
| 394 InitCache(); |
| 395 disk_cache::Entry* null = NULL; |
| 396 const char key[] = "the first key"; |
| 397 |
| 398 MessageLoopHelper helper; |
| 399 CallbackTest callback1(&helper, false); |
| 400 CallbackTest callback2(&helper, false); |
| 401 CallbackTest callback3(&helper, false); |
| 402 CallbackTest callback4(&helper, false); |
| 403 CallbackTest callback5(&helper, false); |
| 404 |
| 405 int expected = 0; |
| 406 const int kSize1 = 10; |
| 407 const int kSize2 = 20; |
| 408 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize1)); |
| 409 scoped_refptr<net::IOBuffer> buffer1_read(new net::IOBuffer(kSize1)); |
| 410 scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize2)); |
| 411 scoped_refptr<net::IOBuffer> buffer2_read(new net::IOBuffer(kSize2)); |
| 412 CacheTestFillBuffer(buffer1->data(), kSize1, false); |
| 413 CacheTestFillBuffer(buffer2->data(), kSize2, false); |
| 414 |
| 415 disk_cache::Entry* entry = NULL; |
| 416 // Create is optimistic, must return OK. |
| 417 ASSERT_EQ(net::OK, |
| 418 cache()->CreateEntry(key, &entry, |
| 419 base::Bind(&CallbackTest::Run, |
| 420 base::Unretained(&callback1)))); |
| 421 EXPECT_NE(null, entry); |
| 422 ScopedEntryPtr entry_closer(entry); |
| 423 |
| 424 // This write may or may not be optimistic (it depends if the previous |
| 425 // optimistic create already finished by the time we call the write here). |
| 426 int ret = entry->WriteData( |
| 427 1, |
| 428 0, |
| 429 buffer1.get(), |
| 430 kSize1, |
| 431 base::Bind(&CallbackTest::Run, base::Unretained(&callback2)), |
| 432 false); |
| 433 EXPECT_TRUE(kSize1 == ret || net::ERR_IO_PENDING == ret); |
| 434 if (net::ERR_IO_PENDING == ret) |
| 435 expected++; |
| 436 |
| 437 // This Read must not be optimistic, since we don't support that yet. |
| 438 EXPECT_EQ(net::ERR_IO_PENDING, |
| 439 entry->ReadData( |
| 440 1, |
| 441 0, |
| 442 buffer1_read.get(), |
| 443 kSize1, |
| 444 base::Bind(&CallbackTest::Run, base::Unretained(&callback3)))); |
| 445 expected++; |
| 446 EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected)); |
| 447 EXPECT_EQ(0, memcmp(buffer1->data(), buffer1_read->data(), kSize1)); |
| 448 |
| 449 // At this point after waiting, the pending operations queue on the entry |
| 450 // should be empty, so the next Write operation must run as optimistic. |
| 451 EXPECT_EQ(kSize2, |
| 452 entry->WriteData( |
| 453 1, |
| 454 0, |
| 455 buffer2.get(), |
| 456 kSize2, |
| 457 base::Bind(&CallbackTest::Run, base::Unretained(&callback4)), |
| 458 false)); |
| 459 |
| 460 // Lets do another read so we block until both the write and the read |
| 461 // operation finishes and we can then test for HasOneRef() below. |
| 462 EXPECT_EQ(net::ERR_IO_PENDING, |
| 463 entry->ReadData( |
| 464 1, |
| 465 0, |
| 466 buffer2_read.get(), |
| 467 kSize2, |
| 468 base::Bind(&CallbackTest::Run, base::Unretained(&callback5)))); |
| 469 expected++; |
| 470 |
| 471 EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected)); |
| 472 EXPECT_EQ(0, memcmp(buffer2->data(), buffer2_read->data(), kSize2)); |
| 473 |
| 474 // Check that we are not leaking. |
| 475 EXPECT_NE(entry, null); |
| 476 EXPECT_TRUE( |
| 477 static_cast<disk_cache::SimpleEntryImpl*>(entry)->HasOneRef()); |
| 478 } |
| 479 |
| 480 TEST_P(DiskCacheSimpleEntryTest, Optimistic2) { |
| 481 // Test sequence: |
| 482 // Create, Open, Close, Close. |
| 483 |
| 484 // Only net::DISK_CACHE runs optimistic operations. |
| 485 TEST_DISABLED_IF(cache_type() != net::DISK_CACHE); |
| 486 InitCache(); |
| 487 disk_cache::Entry* null = NULL; |
| 488 const char key[] = "the first key"; |
| 489 |
| 490 MessageLoopHelper helper; |
| 491 CallbackTest callback1(&helper, false); |
| 492 CallbackTest callback2(&helper, false); |
| 493 |
| 494 disk_cache::Entry* entry = NULL; |
| 495 ASSERT_EQ(net::OK, |
| 496 cache()->CreateEntry(key, &entry, |
| 497 base::Bind(&CallbackTest::Run, |
| 498 base::Unretained(&callback1)))); |
| 499 EXPECT_NE(null, entry); |
| 500 ScopedEntryPtr entry_closer(entry); |
| 501 |
| 502 disk_cache::Entry* entry2 = NULL; |
| 503 ASSERT_EQ(net::ERR_IO_PENDING, |
| 504 cache()->OpenEntry(key, &entry2, |
| 505 base::Bind(&CallbackTest::Run, |
| 506 base::Unretained(&callback2)))); |
| 507 ASSERT_TRUE(helper.WaitUntilCacheIoFinished(1)); |
| 508 |
| 509 EXPECT_NE(null, entry2); |
| 510 EXPECT_EQ(entry, entry2); |
| 511 |
| 512 // We have to call close twice, since we called create and open above. |
| 513 entry->Close(); |
| 514 |
| 515 // Check that we are not leaking. |
| 516 EXPECT_TRUE( |
| 517 static_cast<disk_cache::SimpleEntryImpl*>(entry)->HasOneRef()); |
| 518 } |
| 519 |
| 520 TEST_P(DiskCacheSimpleEntryTest, Optimistic3) { |
| 521 // Test sequence: |
| 522 // Create, Close, Open, Close. |
| 523 InitCache(); |
| 524 disk_cache::Entry* null = NULL; |
| 525 const char key[] = "the first key"; |
| 526 |
| 527 disk_cache::Entry* entry = NULL; |
| 528 ASSERT_EQ(net::OK, |
| 529 cache()->CreateEntry(key, &entry, net::CompletionCallback())); |
| 530 EXPECT_NE(null, entry); |
| 531 entry->Close(); |
| 532 |
| 533 net::TestCompletionCallback cb; |
| 534 disk_cache::Entry* entry2 = NULL; |
| 535 ASSERT_EQ(net::ERR_IO_PENDING, |
| 536 cache()->OpenEntry(key, &entry2, cb.callback())); |
| 537 ASSERT_EQ(net::OK, cb.GetResult(net::ERR_IO_PENDING)); |
| 538 ScopedEntryPtr entry_closer(entry2); |
| 539 |
| 540 EXPECT_NE(null, entry2); |
| 541 EXPECT_EQ(entry, entry2); |
| 542 |
| 543 // Check that we are not leaking. |
| 544 EXPECT_TRUE( |
| 545 static_cast<disk_cache::SimpleEntryImpl*>(entry2)->HasOneRef()); |
| 546 } |
| 547 |
| 548 TEST_P(DiskCacheSimpleEntryTest, Optimistic4) { |
| 549 // Test sequence: |
| 550 // Create, Close, Write, Open, Open, Close, Write, Read, Close. |
| 551 |
| 552 // Only net::DISK_CACHE runs optimistic operations. |
| 553 TEST_DISABLED_IF(cache_type() != net::DISK_CACHE); |
| 554 InitCache(); |
| 555 disk_cache::Entry* null = NULL; |
| 556 const char key[] = "the first key"; |
| 557 |
| 558 net::TestCompletionCallback cb; |
| 559 const int kSize1 = 10; |
| 560 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize1)); |
| 561 CacheTestFillBuffer(buffer1->data(), kSize1, false); |
| 562 disk_cache::Entry* entry = NULL; |
| 563 |
| 564 ASSERT_EQ(net::OK, |
| 565 cache()->CreateEntry(key, &entry, net::CompletionCallback())); |
| 566 EXPECT_NE(null, entry); |
| 567 entry->Close(); |
| 568 |
| 569 // Lets do a Write so we block until both the Close and the Write |
| 570 // operation finishes. Write must fail since we are writing in a closed entry. |
| 571 EXPECT_EQ( |
| 572 net::ERR_IO_PENDING, |
| 573 entry->WriteData(1, 0, buffer1.get(), kSize1, cb.callback(), false)); |
| 574 EXPECT_EQ(net::ERR_FAILED, cb.GetResult(net::ERR_IO_PENDING)); |
| 575 |
| 576 // Finish running the pending tasks so that we fully complete the close |
| 577 // operation and destroy the entry object. |
| 578 base::MessageLoop::current()->RunUntilIdle(); |
| 579 |
| 580 // At this point the |entry| must have been destroyed, and called |
| 581 // RemoveSelfFromBackend(). |
| 582 disk_cache::Entry* entry2 = NULL; |
| 583 ASSERT_EQ(net::ERR_IO_PENDING, |
| 584 cache()->OpenEntry(key, &entry2, cb.callback())); |
| 585 ASSERT_EQ(net::OK, cb.GetResult(net::ERR_IO_PENDING)); |
| 586 EXPECT_NE(null, entry2); |
| 587 |
| 588 disk_cache::Entry* entry3 = NULL; |
| 589 ASSERT_EQ(net::ERR_IO_PENDING, |
| 590 cache()->OpenEntry(key, &entry3, cb.callback())); |
| 591 ASSERT_EQ(net::OK, cb.GetResult(net::ERR_IO_PENDING)); |
| 592 EXPECT_NE(null, entry3); |
| 593 EXPECT_EQ(entry2, entry3); |
| 594 entry3->Close(); |
| 595 |
| 596 // The previous Close doesn't actually closes the entry since we opened it |
| 597 // twice, so the next Write operation must succeed and it must be able to |
| 598 // perform it optimistically, since there is no operation running on this |
| 599 // entry. |
| 600 EXPECT_EQ(kSize1, |
| 601 entry2->WriteData( |
| 602 1, 0, buffer1.get(), kSize1, net::CompletionCallback(), false)); |
| 603 |
| 604 // Lets do another read so we block until both the write and the read |
| 605 // operation finishes and we can then test for HasOneRef() below. |
| 606 EXPECT_EQ(net::ERR_IO_PENDING, |
| 607 entry2->ReadData(1, 0, buffer1.get(), kSize1, cb.callback())); |
| 608 EXPECT_EQ(kSize1, cb.GetResult(net::ERR_IO_PENDING)); |
| 609 |
| 610 // Check that we are not leaking. |
| 611 EXPECT_TRUE( |
| 612 static_cast<disk_cache::SimpleEntryImpl*>(entry2)->HasOneRef()); |
| 613 entry2->Close(); |
| 614 } |
| 615 |
| 616 TEST_P(DiskCacheSimpleEntryTest, Optimistic5) { |
| 617 // Test sequence: |
| 618 // Create, Doom, Write, Read, Close. |
| 619 |
| 620 // Only net::DISK_CACHE runs optimistic operations. |
| 621 TEST_DISABLED_IF(cache_type() != net::DISK_CACHE); |
| 622 InitCache(); |
| 623 disk_cache::Entry* null = NULL; |
| 624 const char key[] = "the first key"; |
| 625 |
| 626 net::TestCompletionCallback cb; |
| 627 const int kSize1 = 10; |
| 628 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize1)); |
| 629 CacheTestFillBuffer(buffer1->data(), kSize1, false); |
| 630 disk_cache::Entry* entry = NULL; |
| 631 |
| 632 ASSERT_EQ(net::OK, |
| 633 cache()->CreateEntry(key, &entry, net::CompletionCallback())); |
| 634 EXPECT_NE(null, entry); |
| 635 ScopedEntryPtr entry_closer(entry); |
| 636 entry->Doom(); |
| 637 |
| 638 EXPECT_EQ( |
| 639 net::ERR_IO_PENDING, |
| 640 entry->WriteData(1, 0, buffer1.get(), kSize1, cb.callback(), false)); |
| 641 EXPECT_EQ(kSize1, cb.GetResult(net::ERR_IO_PENDING)); |
| 642 |
| 643 EXPECT_EQ(net::ERR_IO_PENDING, |
| 644 entry->ReadData(1, 0, buffer1.get(), kSize1, cb.callback())); |
| 645 EXPECT_EQ(kSize1, cb.GetResult(net::ERR_IO_PENDING)); |
| 646 |
| 647 // Check that we are not leaking. |
| 648 EXPECT_TRUE( |
| 649 static_cast<disk_cache::SimpleEntryImpl*>(entry)->HasOneRef()); |
| 650 } |
| 651 |
| 652 TEST_P(DiskCacheSimpleEntryTest, Optimistic6) { |
| 653 // Test sequence: |
| 654 // Create, Write, Doom, Doom, Read, Doom, Close. |
| 655 |
| 656 // Only net::DISK_CACHE runs optimistic operations. |
| 657 TEST_DISABLED_IF(cache_type() != net::DISK_CACHE); |
| 658 InitCache(); |
| 659 disk_cache::Entry* null = NULL; |
| 660 const char key[] = "the first key"; |
| 661 |
| 662 net::TestCompletionCallback cb; |
| 663 const int kSize1 = 10; |
| 664 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize1)); |
| 665 scoped_refptr<net::IOBuffer> buffer1_read(new net::IOBuffer(kSize1)); |
| 666 CacheTestFillBuffer(buffer1->data(), kSize1, false); |
| 667 disk_cache::Entry* entry = NULL; |
| 668 |
| 669 ASSERT_EQ(net::OK, |
| 670 cache()->CreateEntry(key, &entry, net::CompletionCallback())); |
| 671 EXPECT_NE(null, entry); |
| 672 ScopedEntryPtr entry_closer(entry); |
| 673 |
| 674 EXPECT_EQ( |
| 675 net::ERR_IO_PENDING, |
| 676 entry->WriteData(1, 0, buffer1.get(), kSize1, cb.callback(), false)); |
| 677 EXPECT_EQ(kSize1, cb.GetResult(net::ERR_IO_PENDING)); |
| 678 |
| 679 entry->Doom(); |
| 680 entry->Doom(); |
| 681 |
| 682 // This Read must not be optimistic, since we don't support that yet. |
| 683 EXPECT_EQ(net::ERR_IO_PENDING, |
| 684 entry->ReadData(1, 0, buffer1_read.get(), kSize1, cb.callback())); |
| 685 EXPECT_EQ(kSize1, cb.GetResult(net::ERR_IO_PENDING)); |
| 686 EXPECT_EQ(0, memcmp(buffer1->data(), buffer1_read->data(), kSize1)); |
| 687 |
| 688 entry->Doom(); |
| 689 } |
| 690 |
| 691 // Confirm that IO buffers are not referenced by the Simple Cache after a write |
| 692 // completes. |
| 693 TEST_P(DiskCacheSimpleEntryTest, OptimisticWriteReleases) { |
| 694 // Only net::DISK_CACHE runs optimistic operations. |
| 695 TEST_DISABLED_IF(cache_type() != net::DISK_CACHE); |
| 696 |
| 697 InitCache(); |
| 698 |
| 699 const char key[] = "the first key"; |
| 700 disk_cache::Entry* entry = NULL; |
| 701 |
| 702 // First, an optimistic create. |
| 703 ASSERT_EQ(net::OK, |
| 704 cache()->CreateEntry(key, &entry, net::CompletionCallback())); |
| 705 ASSERT_TRUE(entry); |
| 706 ScopedEntryPtr entry_closer(entry); |
| 707 |
| 708 const int kWriteSize = 512; |
| 709 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kWriteSize)); |
| 710 EXPECT_TRUE(buffer1->HasOneRef()); |
| 711 CacheTestFillBuffer(buffer1->data(), kWriteSize, false); |
| 712 |
| 713 // An optimistic write happens only when there is an empty queue of pending |
| 714 // operations. To ensure the queue is empty, we issue a write and wait until |
| 715 // it completes. |
| 716 EXPECT_EQ(kWriteSize, |
| 717 WriteData(entry, 1, 0, buffer1.get(), kWriteSize, false)); |
| 718 EXPECT_TRUE(buffer1->HasOneRef()); |
| 719 |
| 720 // Finally, we should perform an optimistic write and confirm that all |
| 721 // references to the IO buffer have been released. |
| 722 EXPECT_EQ( |
| 723 kWriteSize, |
| 724 entry->WriteData( |
| 725 1, 0, buffer1.get(), kWriteSize, net::CompletionCallback(), false)); |
| 726 EXPECT_TRUE(buffer1->HasOneRef()); |
| 727 } |
| 728 |
| 729 TEST_P(DiskCacheSimpleEntryTest, CreateDoomRace) { |
| 730 // Test sequence: |
| 731 // Create, Doom, Write, Close, Check files are not on disk anymore. |
| 732 InitCache(); |
| 733 disk_cache::Entry* null = NULL; |
| 734 const char key[] = "the first key"; |
| 735 |
| 736 net::TestCompletionCallback cb; |
| 737 const int kSize1 = 10; |
| 738 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize1)); |
| 739 CacheTestFillBuffer(buffer1->data(), kSize1, false); |
| 740 disk_cache::Entry* entry = NULL; |
| 741 |
| 742 ASSERT_EQ(net::OK, |
| 743 cache()->CreateEntry(key, &entry, net::CompletionCallback())); |
| 744 EXPECT_NE(null, entry); |
| 745 |
| 746 EXPECT_EQ(net::ERR_IO_PENDING, cache()->DoomEntry(key, cb.callback())); |
| 747 EXPECT_EQ(net::OK, cb.GetResult(net::ERR_IO_PENDING)); |
| 748 |
| 749 EXPECT_EQ( |
| 750 kSize1, |
| 751 entry->WriteData(0, 0, buffer1.get(), kSize1, cb.callback(), false)); |
| 752 |
| 753 entry->Close(); |
| 754 |
| 755 // Finish running the pending tasks so that we fully complete the close |
| 756 // operation and destroy the entry object. |
| 757 base::MessageLoop::current()->RunUntilIdle(); |
| 758 |
| 759 for (int i = 0; i < disk_cache::kSimpleEntryFileCount; ++i) { |
| 760 base::FilePath entry_file_path = cache_path().AppendASCII( |
| 761 disk_cache::simple_util::GetFilenameFromKeyAndFileIndex(key, i)); |
| 762 base::File::Info info; |
| 763 EXPECT_FALSE(base::GetFileInfo(entry_file_path, &info)); |
| 764 } |
| 765 } |
| 766 |
| 767 TEST_P(DiskCacheSimpleEntryTest, DoomCreateRace) { |
| 768 // This test runs as APP_CACHE to make operations more synchronous. Test |
| 769 // sequence: |
| 770 // Create, Doom, Create. |
| 771 TEST_DISABLED_IF(cache_type() != net::APP_CACHE); |
| 772 InitCache(); |
| 773 disk_cache::Entry* null = NULL; |
| 774 const char key[] = "the first key"; |
| 775 |
| 776 net::TestCompletionCallback create_callback; |
| 777 |
| 778 disk_cache::Entry* entry1 = NULL; |
| 779 ASSERT_EQ(net::OK, |
| 780 create_callback.GetResult( |
| 781 cache()->CreateEntry(key, &entry1, create_callback.callback())))
; |
| 782 ScopedEntryPtr entry1_closer(entry1); |
| 783 EXPECT_NE(null, entry1); |
| 784 |
| 785 net::TestCompletionCallback doom_callback; |
| 786 EXPECT_EQ(net::ERR_IO_PENDING, |
| 787 cache()->DoomEntry(key, doom_callback.callback())); |
| 788 |
| 789 disk_cache::Entry* entry2 = NULL; |
| 790 ASSERT_EQ(net::OK, |
| 791 create_callback.GetResult( |
| 792 cache()->CreateEntry(key, &entry2, create_callback.callback())))
; |
| 793 ScopedEntryPtr entry2_closer(entry2); |
| 794 EXPECT_EQ(net::OK, doom_callback.GetResult(net::ERR_IO_PENDING)); |
| 795 } |
| 796 |
| 797 TEST_P(DiskCacheSimpleEntryTest, DoomDoom) { |
| 798 // Test sequence: |
| 799 // Create, Doom, Create, Doom (1st entry), Open. |
| 800 InitCache(); |
| 801 disk_cache::Entry* null = NULL; |
| 802 |
| 803 const char key[] = "the first key"; |
| 804 |
| 805 disk_cache::Entry* entry1 = NULL; |
| 806 ASSERT_EQ(net::OK, CreateEntry(key, &entry1)); |
| 807 ScopedEntryPtr entry1_closer(entry1); |
| 808 EXPECT_NE(null, entry1); |
| 809 |
| 810 EXPECT_EQ(net::OK, DoomEntry(key)); |
| 811 |
| 812 disk_cache::Entry* entry2 = NULL; |
| 813 ASSERT_EQ(net::OK, CreateEntry(key, &entry2)); |
| 814 ScopedEntryPtr entry2_closer(entry2); |
| 815 EXPECT_NE(null, entry2); |
| 816 |
| 817 // Redundantly dooming entry1 should not delete entry2. |
| 818 disk_cache::SimpleEntryImpl* simple_entry1 = |
| 819 static_cast<disk_cache::SimpleEntryImpl*>(entry1); |
| 820 net::TestCompletionCallback cb; |
| 821 EXPECT_EQ(net::OK, |
| 822 cb.GetResult(simple_entry1->DoomEntry(cb.callback()))); |
| 823 |
| 824 disk_cache::Entry* entry3 = NULL; |
| 825 ASSERT_EQ(net::OK, OpenEntry(key, &entry3)); |
| 826 ScopedEntryPtr entry3_closer(entry3); |
| 827 EXPECT_NE(null, entry3); |
| 828 } |
| 829 |
| 830 TEST_P(DiskCacheSimpleEntryTest, DoomCreateDoom) { |
| 831 // Test sequence: |
| 832 // Create, Doom, Create, Doom. |
| 833 InitCache(); |
| 834 |
| 835 disk_cache::Entry* null = NULL; |
| 836 |
| 837 const char key[] = "the first key"; |
| 838 |
| 839 disk_cache::Entry* entry1 = NULL; |
| 840 ASSERT_EQ(net::OK, CreateEntry(key, &entry1)); |
| 841 ScopedEntryPtr entry1_closer(entry1); |
| 842 EXPECT_NE(null, entry1); |
| 843 |
| 844 entry1->Doom(); |
| 845 |
| 846 disk_cache::Entry* entry2 = NULL; |
| 847 ASSERT_EQ(net::OK, CreateEntry(key, &entry2)); |
| 848 ScopedEntryPtr entry2_closer(entry2); |
| 849 EXPECT_NE(null, entry2); |
| 850 |
| 851 entry2->Doom(); |
| 852 |
| 853 // This test passes if it doesn't crash. |
| 854 } |
| 855 |
| 856 // Checks that an optimistic Create would fail later on a racing Open. |
| 857 TEST_P(DiskCacheSimpleEntryTest, OptimisticCreateFailsOnOpen) { |
| 858 InitCache(); |
| 859 |
| 860 // Create a corrupt file in place of a future entry. Optimistic create should |
| 861 // initially succeed, but realize later that creation failed. |
| 862 const std::string key = "the key"; |
| 863 net::TestCompletionCallback cb; |
| 864 disk_cache::Entry* entry = NULL; |
| 865 disk_cache::Entry* entry2 = NULL; |
| 866 |
| 867 EXPECT_TRUE(disk_cache::simple_util::CreateCorruptFileForTests( |
| 868 key, cache_path())); |
| 869 EXPECT_EQ(net::OK, cache()->CreateEntry(key, &entry, cb.callback())); |
| 870 ASSERT_TRUE(entry); |
| 871 ScopedEntryPtr entry_closer(entry); |
| 872 ASSERT_NE(net::OK, OpenEntry(key, &entry2)); |
| 873 |
| 874 // Check that we are not leaking. |
| 875 EXPECT_TRUE( |
| 876 static_cast<disk_cache::SimpleEntryImpl*>(entry)->HasOneRef()); |
| 877 } |
| 878 |
| 879 // Tests that old entries are evicted while new entries remain in the index. |
| 880 // This test relies on non-mandatory properties of the simple Cache Backend: |
| 881 // LRU eviction, specific values of high-watermark and low-watermark etc. |
| 882 // When changing the eviction algorithm, the test will have to be re-engineered. |
| 883 TEST_P(DiskCacheSimpleEntryTest, EvictOldEntries) { |
| 884 const int kMaxSize = 200 * 1024; |
| 885 const int kWriteSize = kMaxSize / 10; |
| 886 const int kNumExtraEntries = 12; |
| 887 SetMaxSize(kMaxSize); |
| 888 InitCache(); |
| 889 |
| 890 std::string key1("the first key"); |
| 891 disk_cache::Entry* entry; |
| 892 ASSERT_EQ(net::OK, CreateEntry(key1, &entry)); |
| 893 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kWriteSize)); |
| 894 CacheTestFillBuffer(buffer->data(), kWriteSize, false); |
| 895 EXPECT_EQ(kWriteSize, |
| 896 WriteData(entry, 1, 0, buffer.get(), kWriteSize, false)); |
| 897 entry->Close(); |
| 898 AddDelay(); |
| 899 |
| 900 std::string key2("the key prefix"); |
| 901 for (int i = 0; i < kNumExtraEntries; i++) { |
| 902 ASSERT_EQ(net::OK, CreateEntry(key2 + base::StringPrintf("%d", i), &entry)); |
| 903 ScopedEntryPtr entry_closer(entry); |
| 904 EXPECT_EQ(kWriteSize, |
| 905 WriteData(entry, 1, 0, buffer.get(), kWriteSize, false)); |
| 906 } |
| 907 |
| 908 // TODO(pasko): Find a way to wait for the eviction task(s) to finish by using |
| 909 // the internal knowledge about |SimpleBackendImpl|. |
| 910 ASSERT_NE(net::OK, OpenEntry(key1, &entry)) |
| 911 << "Should have evicted the old entry"; |
| 912 for (int i = 0; i < 2; i++) { |
| 913 int entry_no = kNumExtraEntries - i - 1; |
| 914 // Generally there is no guarantee that at this point the backround eviction |
| 915 // is finished. We are testing the positive case, i.e. when the eviction |
| 916 // never reaches this entry, should be non-flaky. |
| 917 ASSERT_EQ(net::OK, OpenEntry(key2 + base::StringPrintf("%d", entry_no), |
| 918 &entry)) |
| 919 << "Should not have evicted fresh entry " << entry_no; |
| 920 entry->Close(); |
| 921 } |
| 922 } |
| 923 |
| 924 // Tests that if a read and a following in-flight truncate are both in progress |
| 925 // simultaniously that they both can occur successfully. See |
| 926 // http://crbug.com/239223 |
| 927 TEST_P(DiskCacheSimpleEntryTest, InFlightTruncate) { |
| 928 InitCache(); |
| 929 |
| 930 const char key[] = "the first key"; |
| 931 |
| 932 const int kBufferSize = 1024; |
| 933 scoped_refptr<net::IOBuffer> write_buffer(new net::IOBuffer(kBufferSize)); |
| 934 CacheTestFillBuffer(write_buffer->data(), kBufferSize, false); |
| 935 |
| 936 disk_cache::Entry* entry = NULL; |
| 937 ASSERT_EQ(net::OK, CreateEntry(key, &entry)); |
| 938 |
| 939 EXPECT_EQ(kBufferSize, |
| 940 WriteData(entry, 1, 0, write_buffer.get(), kBufferSize, false)); |
| 941 entry->Close(); |
| 942 entry = NULL; |
| 943 |
| 944 ASSERT_EQ(net::OK, OpenEntry(key, &entry)); |
| 945 ScopedEntryPtr entry_closer(entry); |
| 946 |
| 947 MessageLoopHelper helper; |
| 948 int expected = 0; |
| 949 |
| 950 // Make a short read. |
| 951 const int kReadBufferSize = 512; |
| 952 scoped_refptr<net::IOBuffer> read_buffer(new net::IOBuffer(kReadBufferSize)); |
| 953 CallbackTest read_callback(&helper, false); |
| 954 EXPECT_EQ(net::ERR_IO_PENDING, |
| 955 entry->ReadData(1, |
| 956 0, |
| 957 read_buffer.get(), |
| 958 kReadBufferSize, |
| 959 base::Bind(&CallbackTest::Run, |
| 960 base::Unretained(&read_callback)))); |
| 961 ++expected; |
| 962 |
| 963 // Truncate the entry to the length of that read. |
| 964 scoped_refptr<net::IOBuffer> |
| 965 truncate_buffer(new net::IOBuffer(kReadBufferSize)); |
| 966 CacheTestFillBuffer(truncate_buffer->data(), kReadBufferSize, false); |
| 967 CallbackTest truncate_callback(&helper, false); |
| 968 EXPECT_EQ(net::ERR_IO_PENDING, |
| 969 entry->WriteData(1, |
| 970 0, |
| 971 truncate_buffer.get(), |
| 972 kReadBufferSize, |
| 973 base::Bind(&CallbackTest::Run, |
| 974 base::Unretained(&truncate_callback)), |
| 975 true)); |
| 976 ++expected; |
| 977 |
| 978 // Wait for both the read and truncation to finish, and confirm that both |
| 979 // succeeded. |
| 980 EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected)); |
| 981 EXPECT_EQ(kReadBufferSize, read_callback.last_result()); |
| 982 EXPECT_EQ(kReadBufferSize, truncate_callback.last_result()); |
| 983 EXPECT_EQ(0, |
| 984 memcmp(write_buffer->data(), read_buffer->data(), kReadBufferSize)); |
| 985 } |
| 986 |
| 987 // Tests that if a write and a read dependant on it are both in flight |
| 988 // simultaneiously that they both can complete successfully without erroneous |
| 989 // early returns. See http://crbug.com/239223 |
| 990 TEST_P(DiskCacheSimpleEntryTest, InFlightRead) { |
| 991 InitCache(); |
| 992 |
| 993 const char key[] = "the first key"; |
| 994 disk_cache::Entry* entry = NULL; |
| 995 ASSERT_EQ(net::OK, |
| 996 cache()->CreateEntry(key, &entry, net::CompletionCallback())); |
| 997 ScopedEntryPtr entry_closer(entry); |
| 998 |
| 999 const int kBufferSize = 1024; |
| 1000 scoped_refptr<net::IOBuffer> write_buffer(new net::IOBuffer(kBufferSize)); |
| 1001 CacheTestFillBuffer(write_buffer->data(), kBufferSize, false); |
| 1002 |
| 1003 MessageLoopHelper helper; |
| 1004 int expected = 0; |
| 1005 |
| 1006 CallbackTest write_callback(&helper, false); |
| 1007 EXPECT_EQ(net::ERR_IO_PENDING, |
| 1008 entry->WriteData(1, |
| 1009 0, |
| 1010 write_buffer.get(), |
| 1011 kBufferSize, |
| 1012 base::Bind(&CallbackTest::Run, |
| 1013 base::Unretained(&write_callback)), |
| 1014 true)); |
| 1015 ++expected; |
| 1016 |
| 1017 scoped_refptr<net::IOBuffer> read_buffer(new net::IOBuffer(kBufferSize)); |
| 1018 CallbackTest read_callback(&helper, false); |
| 1019 EXPECT_EQ(net::ERR_IO_PENDING, |
| 1020 entry->ReadData(1, |
| 1021 0, |
| 1022 read_buffer.get(), |
| 1023 kBufferSize, |
| 1024 base::Bind(&CallbackTest::Run, |
| 1025 base::Unretained(&read_callback)))); |
| 1026 ++expected; |
| 1027 |
| 1028 EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected)); |
| 1029 EXPECT_EQ(kBufferSize, write_callback.last_result()); |
| 1030 EXPECT_EQ(kBufferSize, read_callback.last_result()); |
| 1031 EXPECT_EQ(0, memcmp(write_buffer->data(), read_buffer->data(), kBufferSize)); |
| 1032 } |
| 1033 |
| 1034 TEST_P(DiskCacheSimpleEntryTest, OpenCreateRaceWithNoIndex) { |
| 1035 SimpleCacheCreateBackendExtraData extra_data; |
| 1036 extra_data.set_do_not_wait_for_index(true); |
| 1037 DisableIntegrityCheck(); |
| 1038 InitCacheWithExtraData(&extra_data); |
| 1039 |
| 1040 // Assume the index is not initialized, which is likely, since we are blocking |
| 1041 // the IO thread from executing the index finalization step. |
| 1042 disk_cache::Entry* entry1; |
| 1043 net::TestCompletionCallback cb1; |
| 1044 disk_cache::Entry* entry2; |
| 1045 net::TestCompletionCallback cb2; |
| 1046 int rv1 = cache()->OpenEntry("key", &entry1, cb1.callback()); |
| 1047 int rv2 = cache()->CreateEntry("key", &entry2, cb2.callback()); |
| 1048 |
| 1049 EXPECT_EQ(net::ERR_FAILED, cb1.GetResult(rv1)); |
| 1050 ASSERT_EQ(net::OK, cb2.GetResult(rv2)); |
| 1051 entry2->Close(); |
| 1052 } |
| 1053 |
| 1054 // Checks that reading two entries simultaneously does not discard a CRC check. |
| 1055 // TODO(pasko): make it work with Simple Cache. |
| 1056 TEST_P(DiskCacheSimpleEntryTest, DISABLED_SimpleCacheMultipleReadersCheckCRC) { |
| 1057 InitCache(); |
| 1058 |
| 1059 const char key[] = "key"; |
| 1060 |
| 1061 int size; |
| 1062 ASSERT_TRUE(MakeBadChecksumEntry(key, this, &size)); |
| 1063 |
| 1064 scoped_refptr<net::IOBuffer> read_buffer1(new net::IOBuffer(size)); |
| 1065 scoped_refptr<net::IOBuffer> read_buffer2(new net::IOBuffer(size)); |
| 1066 |
| 1067 // Advance the first reader a little. |
| 1068 disk_cache::Entry* entry = NULL; |
| 1069 ASSERT_EQ(net::OK, OpenEntry(key, &entry)); |
| 1070 EXPECT_EQ(1, ReadData(entry, 0, 0, read_buffer1.get(), 1)); |
| 1071 |
| 1072 // Make the second reader pass the point where the first one is, and close. |
| 1073 disk_cache::Entry* entry2 = NULL; |
| 1074 EXPECT_EQ(net::OK, OpenEntry(key, &entry2)); |
| 1075 EXPECT_EQ(1, ReadData(entry2, 0, 0, read_buffer2.get(), 1)); |
| 1076 EXPECT_EQ(1, ReadData(entry2, 0, 1, read_buffer2.get(), 1)); |
| 1077 entry2->Close(); |
| 1078 |
| 1079 // Read the data till the end should produce an error. |
| 1080 EXPECT_GT(0, ReadData(entry, 0, 1, read_buffer1.get(), size)); |
| 1081 entry->Close(); |
| 1082 DisableIntegrityCheck(); |
| 1083 } |
| 1084 |
| 1085 // Checking one more scenario of overlapped reading of a bad entry. |
| 1086 // Differs from the |SimpleCacheMultipleReadersCheckCRC| only by the order of |
| 1087 // last two reads. |
| 1088 TEST_P(DiskCacheSimpleEntryTest, MultipleReadersCheckCRC2) { |
| 1089 InitCache(); |
| 1090 |
| 1091 const char key[] = "key"; |
| 1092 int size; |
| 1093 ASSERT_TRUE(MakeBadChecksumEntry(key, this, &size)); |
| 1094 |
| 1095 scoped_refptr<net::IOBuffer> read_buffer1(new net::IOBuffer(size)); |
| 1096 scoped_refptr<net::IOBuffer> read_buffer2(new net::IOBuffer(size)); |
| 1097 |
| 1098 // Advance the first reader a little. |
| 1099 disk_cache::Entry* entry = NULL; |
| 1100 ASSERT_EQ(net::OK, OpenEntry(key, &entry)); |
| 1101 ScopedEntryPtr entry_closer(entry); |
| 1102 EXPECT_EQ(1, ReadData(entry, 1, 0, read_buffer1.get(), 1)); |
| 1103 |
| 1104 // Advance the 2nd reader by the same amount. |
| 1105 disk_cache::Entry* entry2 = NULL; |
| 1106 EXPECT_EQ(net::OK, OpenEntry(key, &entry2)); |
| 1107 ScopedEntryPtr entry2_closer(entry2); |
| 1108 EXPECT_EQ(1, ReadData(entry2, 1, 0, read_buffer2.get(), 1)); |
| 1109 |
| 1110 // Continue reading 1st. |
| 1111 EXPECT_GT(0, ReadData(entry, 1, 1, read_buffer1.get(), size)); |
| 1112 |
| 1113 // This read should fail as well because we have previous read failures. |
| 1114 EXPECT_GT(0, ReadData(entry2, 1, 1, read_buffer2.get(), 1)); |
| 1115 DisableIntegrityCheck(); |
| 1116 } |
| 1117 |
| 1118 // Test if we can sequentially read each subset of the data until all the data |
| 1119 // is read, then the CRC is calculated correctly and the reads are successful. |
| 1120 TEST_P(DiskCacheSimpleEntryTest, ReadCombineCRC) { |
| 1121 // Test sequence: |
| 1122 // Create, Write, Read (first half of data), Read (second half of data), |
| 1123 // Close. |
| 1124 InitCache(); |
| 1125 disk_cache::Entry* null = NULL; |
| 1126 const char key[] = "the first key"; |
| 1127 |
| 1128 const int kHalfSize = 200; |
| 1129 const int kSize = 2 * kHalfSize; |
| 1130 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize)); |
| 1131 CacheTestFillBuffer(buffer1->data(), kSize, false); |
| 1132 disk_cache::Entry* entry = NULL; |
| 1133 |
| 1134 ASSERT_EQ(net::OK, CreateEntry(key, &entry)); |
| 1135 EXPECT_NE(null, entry); |
| 1136 |
| 1137 EXPECT_EQ(kSize, WriteData(entry, 1, 0, buffer1.get(), kSize, false)); |
| 1138 entry->Close(); |
| 1139 |
| 1140 disk_cache::Entry* entry2 = NULL; |
| 1141 ASSERT_EQ(net::OK, OpenEntry(key, &entry2)); |
| 1142 EXPECT_EQ(entry, entry2); |
| 1143 |
| 1144 // Read the first half of the data. |
| 1145 int offset = 0; |
| 1146 int buf_len = kHalfSize; |
| 1147 scoped_refptr<net::IOBuffer> buffer1_read1(new net::IOBuffer(buf_len)); |
| 1148 EXPECT_EQ(buf_len, ReadData(entry2, 1, offset, buffer1_read1.get(), buf_len)); |
| 1149 EXPECT_EQ(0, memcmp(buffer1->data(), buffer1_read1->data(), buf_len)); |
| 1150 |
| 1151 // Read the second half of the data. |
| 1152 offset = buf_len; |
| 1153 buf_len = kHalfSize; |
| 1154 scoped_refptr<net::IOBuffer> buffer1_read2(new net::IOBuffer(buf_len)); |
| 1155 EXPECT_EQ(buf_len, ReadData(entry2, 1, offset, buffer1_read2.get(), buf_len)); |
| 1156 char* buffer1_data = buffer1->data() + offset; |
| 1157 EXPECT_EQ(0, memcmp(buffer1_data, buffer1_read2->data(), buf_len)); |
| 1158 |
| 1159 // Check that we are not leaking. |
| 1160 EXPECT_NE(entry, null); |
| 1161 EXPECT_TRUE( |
| 1162 static_cast<disk_cache::SimpleEntryImpl*>(entry)->HasOneRef()); |
| 1163 entry->Close(); |
| 1164 entry = NULL; |
| 1165 } |
| 1166 |
| 1167 // Test if we can write the data not in sequence and read correctly. In |
| 1168 // this case the CRC will not be present. |
| 1169 TEST_P(DiskCacheSimpleEntryTest, NonSequentialWrite) { |
| 1170 // Test sequence: |
| 1171 // Create, Write (second half of data), Write (first half of data), Read, |
| 1172 // Close. |
| 1173 InitCache(); |
| 1174 disk_cache::Entry* null = NULL; |
| 1175 const char key[] = "the first key"; |
| 1176 |
| 1177 const int kHalfSize = 200; |
| 1178 const int kSize = 2 * kHalfSize; |
| 1179 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize)); |
| 1180 scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize)); |
| 1181 CacheTestFillBuffer(buffer1->data(), kSize, false); |
| 1182 char* buffer1_data = buffer1->data() + kHalfSize; |
| 1183 memcpy(buffer2->data(), buffer1_data, kHalfSize); |
| 1184 disk_cache::Entry* entry = NULL; |
| 1185 |
| 1186 ASSERT_EQ(net::OK, CreateEntry(key, &entry)); |
| 1187 EXPECT_NE(null, entry); |
| 1188 |
| 1189 int offset = kHalfSize; |
| 1190 int buf_len = kHalfSize; |
| 1191 |
| 1192 EXPECT_EQ(buf_len, |
| 1193 WriteData(entry, 0, offset, buffer2.get(), buf_len, false)); |
| 1194 offset = 0; |
| 1195 buf_len = kHalfSize; |
| 1196 EXPECT_EQ(buf_len, |
| 1197 WriteData(entry, 0, offset, buffer1.get(), buf_len, false)); |
| 1198 entry->Close(); |
| 1199 |
| 1200 disk_cache::Entry* entry2 = NULL; |
| 1201 ASSERT_EQ(net::OK, OpenEntry(key, &entry2)); |
| 1202 EXPECT_EQ(entry, entry2); |
| 1203 |
| 1204 scoped_refptr<net::IOBuffer> buffer1_read1(new net::IOBuffer(kSize)); |
| 1205 EXPECT_EQ(kSize, ReadData(entry2, 0, 0, buffer1_read1.get(), kSize)); |
| 1206 EXPECT_EQ(0, memcmp(buffer1->data(), buffer1_read1->data(), kSize)); |
| 1207 |
| 1208 // Check that we are not leaking. |
| 1209 ASSERT_NE(entry, null); |
| 1210 EXPECT_TRUE( |
| 1211 static_cast<disk_cache::SimpleEntryImpl*>(entry)->HasOneRef()); |
| 1212 entry->Close(); |
| 1213 entry = NULL; |
| 1214 } |
| 1215 |
| 1216 // Test that changing stream1 size does not affect stream0 (stream0 and stream1 |
| 1217 // are stored in the same file in Simple Cache). |
| 1218 TEST_P(DiskCacheSimpleEntryTest, Stream1SizeChanges) { |
| 1219 InitCache(); |
| 1220 disk_cache::Entry* entry = NULL; |
| 1221 const char key[] = "the key"; |
| 1222 const int kSize = 100; |
| 1223 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize)); |
| 1224 scoped_refptr<net::IOBuffer> buffer_read(new net::IOBuffer(kSize)); |
| 1225 CacheTestFillBuffer(buffer->data(), kSize, false); |
| 1226 |
| 1227 ASSERT_EQ(net::OK, CreateEntry(key, &entry)); |
| 1228 EXPECT_TRUE(entry); |
| 1229 |
| 1230 // Write something into stream0. |
| 1231 EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false)); |
| 1232 EXPECT_EQ(kSize, ReadData(entry, 0, 0, buffer_read.get(), kSize)); |
| 1233 EXPECT_EQ(0, memcmp(buffer->data(), buffer_read->data(), kSize)); |
| 1234 entry->Close(); |
| 1235 |
| 1236 // Extend stream1. |
| 1237 ASSERT_EQ(net::OK, OpenEntry(key, &entry)); |
| 1238 int stream1_size = 100; |
| 1239 EXPECT_EQ(0, WriteData(entry, 1, stream1_size, buffer.get(), 0, false)); |
| 1240 EXPECT_EQ(stream1_size, entry->GetDataSize(1)); |
| 1241 entry->Close(); |
| 1242 |
| 1243 // Check that stream0 data has not been modified and that the EOF record for |
| 1244 // stream 0 contains a crc. |
| 1245 // The entry needs to be reopened before checking the crc: Open will perform |
| 1246 // the synchronization with the previous Close. This ensures the EOF records |
| 1247 // have been written to disk before we attempt to read them independently. |
| 1248 ASSERT_EQ(net::OK, OpenEntry(key, &entry)); |
| 1249 base::FilePath entry_file0_path = cache_path().AppendASCII( |
| 1250 disk_cache::simple_util::GetFilenameFromKeyAndFileIndex(key, 0)); |
| 1251 int flags = base::PLATFORM_FILE_READ | base::PLATFORM_FILE_OPEN; |
| 1252 base::PlatformFile entry_file0 = |
| 1253 base::CreatePlatformFile(entry_file0_path, flags, NULL, NULL); |
| 1254 ASSERT_TRUE(entry_file0 != base::kInvalidPlatformFileValue); |
| 1255 |
| 1256 int data_size[disk_cache::kSimpleEntryStreamCount] = {kSize, stream1_size, 0}; |
| 1257 int sparse_data_size = 0; |
| 1258 disk_cache::SimpleEntryStat entry_stat( |
| 1259 base::Time::Now(), base::Time::Now(), data_size, sparse_data_size); |
| 1260 int eof_offset = entry_stat.GetEOFOffsetInFile(key, 0); |
| 1261 disk_cache::SimpleFileEOF eof_record; |
| 1262 ASSERT_EQ(static_cast<int>(sizeof(eof_record)), base::ReadPlatformFile( |
| 1263 entry_file0, |
| 1264 eof_offset, |
| 1265 reinterpret_cast<char*>(&eof_record), |
| 1266 sizeof(eof_record))); |
| 1267 EXPECT_EQ(disk_cache::kSimpleFinalMagicNumber, eof_record.final_magic_number); |
| 1268 EXPECT_TRUE((eof_record.flags & disk_cache::SimpleFileEOF::FLAG_HAS_CRC32) == |
| 1269 disk_cache::SimpleFileEOF::FLAG_HAS_CRC32); |
| 1270 |
| 1271 buffer_read = new net::IOBuffer(kSize); |
| 1272 EXPECT_EQ(kSize, ReadData(entry, 0, 0, buffer_read.get(), kSize)); |
| 1273 EXPECT_EQ(0, memcmp(buffer->data(), buffer_read->data(), kSize)); |
| 1274 |
| 1275 // Shrink stream1. |
| 1276 stream1_size = 50; |
| 1277 EXPECT_EQ(0, WriteData(entry, 1, stream1_size, buffer.get(), 0, true)); |
| 1278 EXPECT_EQ(stream1_size, entry->GetDataSize(1)); |
| 1279 entry->Close(); |
| 1280 |
| 1281 // Check that stream0 data has not been modified. |
| 1282 buffer_read = new net::IOBuffer(kSize); |
| 1283 ASSERT_EQ(net::OK, OpenEntry(key, &entry)); |
| 1284 EXPECT_EQ(kSize, ReadData(entry, 0, 0, buffer_read.get(), kSize)); |
| 1285 EXPECT_EQ(0, memcmp(buffer->data(), buffer_read->data(), kSize)); |
| 1286 entry->Close(); |
| 1287 entry = NULL; |
| 1288 } |
| 1289 |
| 1290 // Test that writing within the range for which the crc has already been |
| 1291 // computed will properly invalidate the computed crc. |
| 1292 TEST_P(DiskCacheSimpleEntryTest, CRCRewrite) { |
| 1293 // Test sequence: |
| 1294 // Create, Write (big data), Write (small data in the middle), Close. |
| 1295 // Open, Read (all), Close. |
| 1296 InitCache(); |
| 1297 disk_cache::Entry* null = NULL; |
| 1298 const char key[] = "the first key"; |
| 1299 |
| 1300 const int kHalfSize = 200; |
| 1301 const int kSize = 2 * kHalfSize; |
| 1302 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize)); |
| 1303 scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kHalfSize)); |
| 1304 CacheTestFillBuffer(buffer1->data(), kSize, false); |
| 1305 CacheTestFillBuffer(buffer2->data(), kHalfSize, false); |
| 1306 |
| 1307 disk_cache::Entry* entry = NULL; |
| 1308 ASSERT_EQ(net::OK, CreateEntry(key, &entry)); |
| 1309 EXPECT_NE(null, entry); |
| 1310 entry->Close(); |
| 1311 |
| 1312 for (int i = 0; i < disk_cache::kSimpleEntryStreamCount; ++i) { |
| 1313 ASSERT_EQ(net::OK, OpenEntry(key, &entry)); |
| 1314 int offset = 0; |
| 1315 int buf_len = kSize; |
| 1316 |
| 1317 EXPECT_EQ(buf_len, |
| 1318 WriteData(entry, i, offset, buffer1.get(), buf_len, false)); |
| 1319 offset = kHalfSize; |
| 1320 buf_len = kHalfSize; |
| 1321 EXPECT_EQ(buf_len, |
| 1322 WriteData(entry, i, offset, buffer2.get(), buf_len, false)); |
| 1323 entry->Close(); |
| 1324 |
| 1325 ASSERT_EQ(net::OK, OpenEntry(key, &entry)); |
| 1326 |
| 1327 scoped_refptr<net::IOBuffer> buffer1_read1(new net::IOBuffer(kSize)); |
| 1328 EXPECT_EQ(kSize, ReadData(entry, i, 0, buffer1_read1.get(), kSize)); |
| 1329 EXPECT_EQ(0, memcmp(buffer1->data(), buffer1_read1->data(), kHalfSize)); |
| 1330 EXPECT_EQ( |
| 1331 0, |
| 1332 memcmp(buffer2->data(), buffer1_read1->data() + kHalfSize, kHalfSize)); |
| 1333 |
| 1334 entry->Close(); |
| 1335 } |
| 1336 } |
| 1337 |
| 1338 bool ThirdStreamFileExists(const base::FilePath& cache_path, |
| 1339 const char* key) { |
| 1340 int third_stream_file_index = |
| 1341 disk_cache::simple_util::GetFileIndexFromStreamIndex(2); |
| 1342 base::FilePath third_stream_file_path = cache_path.AppendASCII( |
| 1343 disk_cache::simple_util::GetFilenameFromKeyAndFileIndex( |
| 1344 key, third_stream_file_index)); |
| 1345 return PathExists(third_stream_file_path); |
| 1346 } |
| 1347 |
| 1348 // Check that a newly-created entry with no third-stream writes omits the |
| 1349 // third stream file. |
| 1350 TEST_P(DiskCacheSimpleEntryTest, OmittedThirdStream1) { |
| 1351 InitCache(); |
| 1352 |
| 1353 const char key[] = "key"; |
| 1354 |
| 1355 disk_cache::Entry* entry; |
| 1356 |
| 1357 // Create entry and close without writing: third stream file should be |
| 1358 // omitted, since the stream is empty. |
| 1359 ASSERT_EQ(net::OK, CreateEntry(key, &entry)); |
| 1360 entry->Close(); |
| 1361 EXPECT_FALSE(ThirdStreamFileExists(cache_path(), key)); |
| 1362 |
| 1363 DoomEntry(key); |
| 1364 EXPECT_FALSE(ThirdStreamFileExists(cache_path(), key)); |
| 1365 } |
| 1366 |
| 1367 // Check that a newly-created entry with only a single zero-offset, zero-length |
| 1368 // write omits the third stream file. |
| 1369 TEST_P(DiskCacheSimpleEntryTest, OmittedThirdStream2) { |
| 1370 InitCache(); |
| 1371 |
| 1372 const int kHalfSize = 8; |
| 1373 const int kSize = kHalfSize * 2; |
| 1374 const char key[] = "key"; |
| 1375 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize)); |
| 1376 CacheTestFillBuffer(buffer->data(), kHalfSize, false); |
| 1377 |
| 1378 disk_cache::Entry* entry; |
| 1379 |
| 1380 // Create entry, write empty buffer to third stream, and close: third stream |
| 1381 // should still be omitted, since the entry ignores writes that don't modify |
| 1382 // data or change the length. |
| 1383 ASSERT_EQ(net::OK, CreateEntry(key, &entry)); |
| 1384 EXPECT_EQ(0, WriteData(entry, 2, 0, buffer, 0, true)); |
| 1385 entry->Close(); |
| 1386 EXPECT_FALSE(ThirdStreamFileExists(cache_path(), key)); |
| 1387 |
| 1388 DoomEntry(key); |
| 1389 EXPECT_FALSE(ThirdStreamFileExists(cache_path(), key)); |
| 1390 } |
| 1391 |
| 1392 // Check that we can read back data written to the third stream. |
| 1393 TEST_P(DiskCacheSimpleEntryTest, OmittedThirdStream3) { |
| 1394 InitCache(); |
| 1395 |
| 1396 const int kHalfSize = 8; |
| 1397 const int kSize = kHalfSize * 2; |
| 1398 const char key[] = "key"; |
| 1399 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize)); |
| 1400 scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize)); |
| 1401 CacheTestFillBuffer(buffer1->data(), kHalfSize, false); |
| 1402 |
| 1403 disk_cache::Entry* entry; |
| 1404 |
| 1405 // Create entry, write data to third stream, and close: third stream should |
| 1406 // not be omitted, since it contains data. Re-open entry and ensure there |
| 1407 // are that many bytes in the third stream. |
| 1408 ASSERT_EQ(net::OK, CreateEntry(key, &entry)); |
| 1409 EXPECT_EQ(kHalfSize, WriteData(entry, 2, 0, buffer1, kHalfSize, true)); |
| 1410 entry->Close(); |
| 1411 EXPECT_TRUE(ThirdStreamFileExists(cache_path(), key)); |
| 1412 |
| 1413 ASSERT_EQ(net::OK, OpenEntry(key, &entry)); |
| 1414 EXPECT_EQ(kHalfSize, ReadData(entry, 2, 0, buffer2, kSize)); |
| 1415 EXPECT_EQ(0, memcmp(buffer1->data(), buffer2->data(), kHalfSize)); |
| 1416 entry->Close(); |
| 1417 EXPECT_TRUE(ThirdStreamFileExists(cache_path(), key)); |
| 1418 |
| 1419 DoomEntry(key); |
| 1420 EXPECT_FALSE(ThirdStreamFileExists(cache_path(), key)); |
| 1421 } |
| 1422 |
| 1423 // Check that we remove the third stream file upon opening an entry and finding |
| 1424 // the third stream empty. (This is the upgrade path for entries written |
| 1425 // before the third stream was optional.) |
| 1426 TEST_P(DiskCacheSimpleEntryTest, OmittedThirdStream4) { |
| 1427 InitCache(); |
| 1428 |
| 1429 const int kHalfSize = 8; |
| 1430 const int kSize = kHalfSize * 2; |
| 1431 const char key[] = "key"; |
| 1432 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize)); |
| 1433 scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize)); |
| 1434 CacheTestFillBuffer(buffer1->data(), kHalfSize, false); |
| 1435 |
| 1436 disk_cache::Entry* entry; |
| 1437 |
| 1438 // Create entry, write data to third stream, truncate third stream back to |
| 1439 // empty, and close: third stream will not initially be omitted, since entry |
| 1440 // creates the file when the first significant write comes in, and only |
| 1441 // removes it on open if it is empty. Reopen, ensure that the file is |
| 1442 // deleted, and that there's no data in the third stream. |
| 1443 ASSERT_EQ(net::OK, CreateEntry(key, &entry)); |
| 1444 EXPECT_EQ(kHalfSize, WriteData(entry, 2, 0, buffer1, kHalfSize, true)); |
| 1445 EXPECT_EQ(0, WriteData(entry, 2, 0, buffer1, 0, true)); |
| 1446 entry->Close(); |
| 1447 EXPECT_TRUE(ThirdStreamFileExists(cache_path(), key)); |
| 1448 |
| 1449 ASSERT_EQ(net::OK, OpenEntry(key, &entry)); |
| 1450 EXPECT_FALSE(ThirdStreamFileExists(cache_path(), key)); |
| 1451 EXPECT_EQ(0, ReadData(entry, 2, 0, buffer2, kSize)); |
| 1452 entry->Close(); |
| 1453 EXPECT_FALSE(ThirdStreamFileExists(cache_path(), key)); |
| 1454 |
| 1455 DoomEntry(key); |
| 1456 EXPECT_FALSE(ThirdStreamFileExists(cache_path(), key)); |
| 1457 } |
| 1458 |
| 1459 // Check that we don't accidentally create the third stream file once the entry |
| 1460 // has been doomed. |
| 1461 TEST_P(DiskCacheSimpleEntryTest, OmittedThirdStream5) { |
| 1462 InitCache(); |
| 1463 |
| 1464 const int kHalfSize = 8; |
| 1465 const int kSize = kHalfSize * 2; |
| 1466 const char key[] = "key"; |
| 1467 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize)); |
| 1468 CacheTestFillBuffer(buffer->data(), kHalfSize, false); |
| 1469 |
| 1470 disk_cache::Entry* entry; |
| 1471 |
| 1472 // Create entry, doom entry, write data to third stream, and close: third |
| 1473 // stream should not exist. (Note: We don't care if the write fails, just |
| 1474 // that it doesn't cause the file to be created on disk.) |
| 1475 ASSERT_EQ(net::OK, CreateEntry(key, &entry)); |
| 1476 entry->Doom(); |
| 1477 WriteData(entry, 2, 0, buffer, kHalfSize, true); |
| 1478 entry->Close(); |
| 1479 EXPECT_FALSE(ThirdStreamFileExists(cache_path(), key)); |
| 1480 } |
| 1481 |
| 1482 // There could be a race between Doom and an optimistic write. |
| 1483 TEST_P(DiskCacheSimpleEntryTest, DoomOptimisticWritesRace) { |
| 1484 // Test sequence: |
| 1485 // Create, first Write, second Write, Close. |
| 1486 // Open, Close. |
| 1487 InitCache(); |
| 1488 disk_cache::Entry* null = NULL; |
| 1489 const char key[] = "the first key"; |
| 1490 |
| 1491 const int kSize = 200; |
| 1492 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize)); |
| 1493 scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize)); |
| 1494 CacheTestFillBuffer(buffer1->data(), kSize, false); |
| 1495 CacheTestFillBuffer(buffer2->data(), kSize, false); |
| 1496 |
| 1497 // The race only happens on stream 1 and stream 2. |
| 1498 for (int i = 0; i < disk_cache::kSimpleEntryStreamCount; ++i) { |
| 1499 ASSERT_EQ(net::OK, DoomAllEntries()); |
| 1500 disk_cache::Entry* entry = NULL; |
| 1501 |
| 1502 ASSERT_EQ(net::OK, CreateEntry(key, &entry)); |
| 1503 EXPECT_NE(null, entry); |
| 1504 entry->Close(); |
| 1505 entry = NULL; |
| 1506 |
| 1507 ASSERT_EQ(net::OK, DoomAllEntries()); |
| 1508 ASSERT_EQ(net::OK, CreateEntry(key, &entry)); |
| 1509 EXPECT_NE(null, entry); |
| 1510 |
| 1511 int offset = 0; |
| 1512 int buf_len = kSize; |
| 1513 // This write should not be optimistic (since create is). |
| 1514 EXPECT_EQ(buf_len, |
| 1515 WriteData(entry, i, offset, buffer1.get(), buf_len, false)); |
| 1516 |
| 1517 offset = kSize; |
| 1518 // This write should be optimistic. |
| 1519 EXPECT_EQ(buf_len, |
| 1520 WriteData(entry, i, offset, buffer2.get(), buf_len, false)); |
| 1521 entry->Close(); |
| 1522 |
| 1523 ASSERT_EQ(net::OK, OpenEntry(key, &entry)); |
| 1524 EXPECT_NE(null, entry); |
| 1525 |
| 1526 entry->Close(); |
| 1527 entry = NULL; |
| 1528 } |
| 1529 } |
| 1530 |
| 1531 TEST_P(DiskCacheSimpleEntryTest, TruncateLargeSparseFile) { |
| 1532 const int kSize = 1024; |
| 1533 |
| 1534 // An entry is allowed sparse data 1/10 the size of the cache, so this size |
| 1535 // allows for one |kSize|-sized range plus overhead, but not two ranges. |
| 1536 SetMaxSize(kSize * 15); |
| 1537 InitCache(); |
| 1538 |
| 1539 const char key[] = "key"; |
| 1540 disk_cache::Entry* null = NULL; |
| 1541 disk_cache::Entry* entry; |
| 1542 ASSERT_EQ(net::OK, CreateEntry(key, &entry)); |
| 1543 EXPECT_NE(null, entry); |
| 1544 |
| 1545 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize)); |
| 1546 CacheTestFillBuffer(buffer->data(), kSize, false); |
| 1547 net::TestCompletionCallback callback; |
| 1548 int ret; |
| 1549 |
| 1550 // Verify initial conditions. |
| 1551 ret = entry->ReadSparseData(0, buffer, kSize, callback.callback()); |
| 1552 EXPECT_EQ(0, callback.GetResult(ret)); |
| 1553 |
| 1554 ret = entry->ReadSparseData(kSize, buffer, kSize, callback.callback()); |
| 1555 EXPECT_EQ(0, callback.GetResult(ret)); |
| 1556 |
| 1557 // Write a range and make sure it reads back. |
| 1558 ret = entry->WriteSparseData(0, buffer, kSize, callback.callback()); |
| 1559 EXPECT_EQ(kSize, callback.GetResult(ret)); |
| 1560 |
| 1561 ret = entry->ReadSparseData(0, buffer, kSize, callback.callback()); |
| 1562 EXPECT_EQ(kSize, callback.GetResult(ret)); |
| 1563 |
| 1564 // Write another range and make sure it reads back. |
| 1565 ret = entry->WriteSparseData(kSize, buffer, kSize, callback.callback()); |
| 1566 EXPECT_EQ(kSize, callback.GetResult(ret)); |
| 1567 |
| 1568 ret = entry->ReadSparseData(kSize, buffer, kSize, callback.callback()); |
| 1569 EXPECT_EQ(kSize, callback.GetResult(ret)); |
| 1570 |
| 1571 // Make sure the first range was removed when the second was written. |
| 1572 ret = entry->ReadSparseData(0, buffer, kSize, callback.callback()); |
| 1573 EXPECT_EQ(0, callback.GetResult(ret)); |
| 1574 |
| 1575 entry->Close(); |
| 1576 } |
| 1577 |
| 1578 TEST_P(DiskCacheSimpleBackendTest, SimpleCacheOpenMissingFile) { |
| 1579 InitCache(); |
| 1580 |
| 1581 const char* key = "the first key"; |
| 1582 disk_cache::Entry* entry = NULL; |
| 1583 |
| 1584 ASSERT_EQ(net::OK, CreateEntry(key, &entry)); |
| 1585 ASSERT_TRUE(entry != NULL); |
| 1586 entry->Close(); |
| 1587 entry = NULL; |
| 1588 |
| 1589 // To make sure the file creation completed we need to call open again so that |
| 1590 // we block until it actually created the files. |
| 1591 ASSERT_EQ(net::OK, OpenEntry(key, &entry)); |
| 1592 ASSERT_TRUE(entry != NULL); |
| 1593 entry->Close(); |
| 1594 entry = NULL; |
| 1595 |
| 1596 // Delete one of the files in the entry. |
| 1597 base::FilePath to_delete_file = cache_path().AppendASCII( |
| 1598 disk_cache::simple_util::GetFilenameFromKeyAndFileIndex(key, 0)); |
| 1599 EXPECT_TRUE(base::PathExists(to_delete_file)); |
| 1600 EXPECT_TRUE(disk_cache::DeleteCacheFile(to_delete_file)); |
| 1601 |
| 1602 // Failing to open the entry should delete the rest of these files. |
| 1603 ASSERT_EQ(net::ERR_FAILED, OpenEntry(key, &entry)); |
| 1604 |
| 1605 // Confirm the rest of the files are gone. |
| 1606 for (int i = 1; i < disk_cache::kSimpleEntryFileCount; ++i) { |
| 1607 base::FilePath should_be_gone_file(cache_path().AppendASCII( |
| 1608 disk_cache::simple_util::GetFilenameFromKeyAndFileIndex(key, i))); |
| 1609 EXPECT_FALSE(base::PathExists(should_be_gone_file)); |
| 1610 } |
| 1611 } |
| 1612 |
| 1613 TEST_P(DiskCacheSimpleBackendTest, SimpleCacheOpenBadFile) { |
| 1614 InitCache(); |
| 1615 |
| 1616 const char* key = "the first key"; |
| 1617 disk_cache::Entry* entry = NULL; |
| 1618 |
| 1619 ASSERT_EQ(net::OK, CreateEntry(key, &entry)); |
| 1620 disk_cache::Entry* null = NULL; |
| 1621 ASSERT_NE(null, entry); |
| 1622 entry->Close(); |
| 1623 entry = NULL; |
| 1624 |
| 1625 // To make sure the file creation completed we need to call open again so that |
| 1626 // we block until it actually created the files. |
| 1627 ASSERT_EQ(net::OK, OpenEntry(key, &entry)); |
| 1628 ASSERT_NE(null, entry); |
| 1629 entry->Close(); |
| 1630 entry = NULL; |
| 1631 |
| 1632 // Write an invalid header for stream 0 and stream 1. |
| 1633 base::FilePath entry_file1_path = cache_path().AppendASCII( |
| 1634 disk_cache::simple_util::GetFilenameFromKeyAndFileIndex(key, 0)); |
| 1635 |
| 1636 disk_cache::SimpleFileHeader header; |
| 1637 header.initial_magic_number = GG_UINT64_C(0xbadf00d); |
| 1638 EXPECT_EQ( |
| 1639 implicit_cast<int>(sizeof(header)), |
| 1640 file_util::WriteFile(entry_file1_path, reinterpret_cast<char*>(&header), |
| 1641 sizeof(header))); |
| 1642 ASSERT_EQ(net::ERR_FAILED, OpenEntry(key, &entry)); |
| 1643 } |
| 1644 |
| 1645 // Creates entries based on random keys. Stores these keys in |key_pool|. |
| 1646 bool CreateSetOfRandomEntries(DiskCacheTest* test, |
| 1647 std::set<std::string>* key_pool) { |
| 1648 const int kNumEntries = 10; |
| 1649 |
| 1650 for (int i = 0; i < kNumEntries; ++i) { |
| 1651 std::string key = GenerateKey(true); |
| 1652 disk_cache::Entry* entry; |
| 1653 if (test->CreateEntry(key, &entry) != net::OK) |
| 1654 return false; |
| 1655 key_pool->insert(key); |
| 1656 entry->Close(); |
| 1657 } |
| 1658 |
| 1659 return |
| 1660 key_pool->size() == implicit_cast<size_t>(test->cache()->GetEntryCount()); |
| 1661 } |
| 1662 |
| 1663 // Performs iteration over the backend and checks that the keys of entries |
| 1664 // opened are in |keys_to_match|, then erases them. Up to |max_to_open| entries |
| 1665 // will be opened, if it is positive. Otherwise, iteration will continue until |
| 1666 // OpenNextEntry stops returning net::OK. |
| 1667 bool EnumerateAndMatchKeys(DiskCacheTest* test, |
| 1668 int max_to_open, |
| 1669 void** iter, |
| 1670 std::set<std::string>* keys_to_match, |
| 1671 size_t* count) { |
| 1672 Entry* entry; |
| 1673 |
| 1674 while (test->OpenNextEntry(iter, &entry) == net::OK) { |
| 1675 if (!entry) |
| 1676 return false; |
| 1677 EXPECT_EQ(1U, keys_to_match->erase(entry->GetKey())); |
| 1678 entry->Close(); |
| 1679 ++(*count); |
| 1680 if (max_to_open >= 0 && implicit_cast<int>(*count) >= max_to_open) |
| 1681 break; |
| 1682 }; |
| 1683 |
| 1684 return true; |
| 1685 } |
| 1686 |
| 1687 // Tests basic functionality of the SimpleBackend implementation of the |
| 1688 // enumeration API. |
| 1689 TEST_P(DiskCacheSimpleBackendTest, EnumerationBasics) { |
| 1690 InitCache(); |
| 1691 std::set<std::string> key_pool; |
| 1692 ASSERT_TRUE(CreateSetOfRandomEntries(this, &key_pool)); |
| 1693 |
| 1694 // Check that enumeration returns all entries. |
| 1695 std::set<std::string> keys_to_match(key_pool); |
| 1696 void* iter = NULL; |
| 1697 size_t count = 0; |
| 1698 ASSERT_TRUE(EnumerateAndMatchKeys(this, -1, &iter, &keys_to_match, &count)); |
| 1699 cache()->EndEnumeration(&iter); |
| 1700 EXPECT_EQ(key_pool.size(), count); |
| 1701 EXPECT_TRUE(keys_to_match.empty()); |
| 1702 |
| 1703 // Check that opening entries does not affect enumeration. |
| 1704 keys_to_match = key_pool; |
| 1705 iter = NULL; |
| 1706 count = 0; |
| 1707 disk_cache::Entry* entry_opened_before; |
| 1708 ASSERT_EQ(net::OK, OpenEntry(*(key_pool.begin()), &entry_opened_before)); |
| 1709 ASSERT_TRUE(EnumerateAndMatchKeys(this, key_pool.size()/2, |
| 1710 &iter, |
| 1711 &keys_to_match, |
| 1712 &count)); |
| 1713 |
| 1714 disk_cache::Entry* entry_opened_middle; |
| 1715 ASSERT_EQ(net::OK, |
| 1716 OpenEntry(*(keys_to_match.begin()), &entry_opened_middle)); |
| 1717 ASSERT_TRUE(EnumerateAndMatchKeys(this, -1, &iter, &keys_to_match, &count)); |
| 1718 cache()->EndEnumeration(&iter); |
| 1719 entry_opened_before->Close(); |
| 1720 entry_opened_middle->Close(); |
| 1721 |
| 1722 EXPECT_EQ(key_pool.size(), count); |
| 1723 EXPECT_TRUE(keys_to_match.empty()); |
| 1724 } |
| 1725 |
| 1726 // Tests that the enumerations are not affected by dooming an entry in the |
| 1727 // middle. |
| 1728 TEST_P(DiskCacheSimpleBackendTest, EnumerationWhileDoomed) { |
| 1729 InitCache(); |
| 1730 std::set<std::string> key_pool; |
| 1731 ASSERT_TRUE(CreateSetOfRandomEntries(this, &key_pool)); |
| 1732 |
| 1733 // Check that enumeration returns all entries but the doomed one. |
| 1734 std::set<std::string> keys_to_match(key_pool); |
| 1735 void* iter = NULL; |
| 1736 size_t count = 0; |
| 1737 ASSERT_TRUE(EnumerateAndMatchKeys(this, key_pool.size()/2, |
| 1738 &iter, |
| 1739 &keys_to_match, |
| 1740 &count)); |
| 1741 |
| 1742 std::string key_to_delete = *(keys_to_match.begin()); |
| 1743 DoomEntry(key_to_delete); |
| 1744 keys_to_match.erase(key_to_delete); |
| 1745 key_pool.erase(key_to_delete); |
| 1746 ASSERT_TRUE(EnumerateAndMatchKeys(this, -1, &iter, &keys_to_match, &count)); |
| 1747 cache()->EndEnumeration(&iter); |
| 1748 |
| 1749 EXPECT_EQ(key_pool.size(), count); |
| 1750 EXPECT_TRUE(keys_to_match.empty()); |
| 1751 } |
| 1752 |
| 1753 // Tests that enumerations are not affected by corrupt files. |
| 1754 TEST_P(DiskCacheSimpleBackendTest, EnumerationCorruption) { |
| 1755 InitCache(); |
| 1756 std::set<std::string> key_pool; |
| 1757 ASSERT_TRUE(CreateSetOfRandomEntries(this, &key_pool)); |
| 1758 |
| 1759 // Create a corrupt entry. The write/read sequence ensures that the entry will |
| 1760 // have been created before corrupting the platform files, in the case of |
| 1761 // optimistic operations. |
| 1762 const std::string key = "the key"; |
| 1763 disk_cache::Entry* corrupted_entry; |
| 1764 |
| 1765 ASSERT_EQ(net::OK, CreateEntry(key, &corrupted_entry)); |
| 1766 ASSERT_TRUE(corrupted_entry); |
| 1767 const int kSize = 50; |
| 1768 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize)); |
| 1769 CacheTestFillBuffer(buffer->data(), kSize, false); |
| 1770 ASSERT_EQ(kSize, |
| 1771 WriteData(corrupted_entry, 0, 0, buffer.get(), kSize, false)); |
| 1772 ASSERT_EQ(kSize, ReadData(corrupted_entry, 0, 0, buffer.get(), kSize)); |
| 1773 corrupted_entry->Close(); |
| 1774 |
| 1775 EXPECT_TRUE(simple_util::CreateCorruptFileForTests(key, cache_path())); |
| 1776 EXPECT_EQ(key_pool.size() + 1, |
| 1777 implicit_cast<size_t>(cache()->GetEntryCount())); |
| 1778 |
| 1779 // Check that enumeration returns all entries but the corrupt one. |
| 1780 std::set<std::string> keys_to_match(key_pool); |
| 1781 void* iter = NULL; |
| 1782 size_t count = 0; |
| 1783 ASSERT_TRUE(EnumerateAndMatchKeys(this, -1, &iter, &keys_to_match, &count)); |
| 1784 cache()->EndEnumeration(&iter); |
| 1785 |
| 1786 EXPECT_EQ(key_pool.size(), count); |
| 1787 EXPECT_TRUE(keys_to_match.empty()); |
| 1788 } |
| 1789 |
| 1790 } // namespace |
| 1791 |
| 1792 } // namespace disk_cache |
OLD | NEW |